From 9ac1825d23741b1923dda64d58db9624a9f86989 Mon Sep 17 00:00:00 2001 From: RJbalikian <46536937+RJbalikian@users.noreply.github.com> Date: Wed, 30 Oct 2024 11:55:10 -0500 Subject: [PATCH] update docs (including github pages?) --- docs/.readthedocs.yaml | 33 - docs/Makefile | 20 - docs/_build/html/.buildinfo | 4 - docs/_build/html/genindex.html | 759 - docs/_build/html/index.html | 356 - docs/_build/html/objects.inv | Bin 1478 -> 0 bytes docs/_build/html/py-modindex.html | 157 - docs/_build/html/searchindex.js | 1 - docs/_build/html/sprit.html | 1986 --- docs/_build/html/sprit.sprit_cli.html | 183 - docs/_build/html/sprit.sprit_hvsr.html | 1869 --- docs/_build/html/sprit.sprit_jupyter_UI.html | 180 - docs/_build/html/sprit.sprit_plot.html | 257 - .../_build/html/sprit.sprit_streamlit_ui.html | 209 - docs/_build/html/sprit.sprit_tkinter_ui.html | 237 - docs/_build/html/sprit.sprit_utils.html | 303 - docs/_generate_docs.py | 2 +- .../html/search.html => _modules/index.html} | 71 +- docs/_modules/sprit/sprit_cli.html | 224 + docs/_modules/sprit/sprit_hvsr.html | 10021 +++++++++++++ docs/_modules/sprit/sprit_jupyter_UI.html | 2393 +++ docs/_modules/sprit/sprit_plot.html | 1301 ++ docs/_modules/sprit/sprit_streamlit_ui.html | 694 + docs/_modules/sprit/sprit_tkinter_ui.html | 3250 ++++ docs/_modules/sprit/sprit_utils.html | 671 + docs/{index.rst => _sources/index.rst.txt} | 0 docs/{sprit.rst => _sources/sprit.rst.txt} | 0 .../sprit.sprit_cli.rst.txt} | 0 .../sprit.sprit_hvsr.rst.txt} | 0 .../sprit.sprit_jupyter_UI.rst.txt} | 0 .../sprit.sprit_plot.rst.txt} | 0 .../sprit.sprit_streamlit_ui.rst.txt} | 0 .../sprit.sprit_tkinter_ui.rst.txt} | 0 .../sprit.sprit_utils.rst.txt} | 0 docs/{_build/doctrees => }/environment.pickle | Bin 997846 -> 997846 bytes docs/genindex.html | 14 +- docs/{_build/doctrees => }/index.doctree | Bin docs/index.html | 14 +- docs/main.html | 7680 +--------- docs/make.bat | 35 - docs/py-modindex.html | 16 +- docs/search.html | 18 +- docs/{_build/doctrees => }/sprit.doctree | Bin docs/sprit.html | 14 +- docs/sprit.ptyhonfile.html | 106 - .../doctrees => }/sprit.sprit_cli.doctree | Bin docs/sprit.sprit_cli.html | 14 +- .../doctrees => }/sprit.sprit_hvsr.doctree | Bin docs/sprit.sprit_hvsr.html | 14 +- .../sprit.sprit_jupyter_UI.doctree | Bin docs/sprit.sprit_jupyter_UI.html | 14 +- .../doctrees => }/sprit.sprit_plot.doctree | Bin docs/sprit.sprit_plot.html | 14 +- .../sprit.sprit_streamlit_ui.doctree | Bin docs/sprit.sprit_streamlit_ui.html | 14 +- .../sprit.sprit_tkinter_ui.doctree | Bin docs/sprit.sprit_tkinter_ui.html | 14 +- .../doctrees => }/sprit.sprit_utils.doctree | Bin docs/sprit.sprit_utils.html | 14 +- docs/sprit_cli.html | 254 +- docs/sprit_gui.html | 9234 ------------ docs/sprit_hvsr.html | 12266 +--------------- docs/sprit_jupyter_UI.html | 4539 +----- docs/sprit_plot.html | 167 + docs/sprit_streamlit_ui.html | 111 + docs/sprit_tkinter_ui.html | 3075 ++++ docs/sprit_utils.html | 918 +- docs/theme.css | 4 - docs/theme.js | 1 - sprit/ui_tests/RemiTest.py | 44 - sprit/ui_tests/__init__,py | 8 - sprit/ui_tests/remiOverview.py | 314 - sprit/ui_tests/sprit_nice.py | 45 - sprit/ui_tests/sprit_remi.py | 307 - 74 files changed, 23384 insertions(+), 41079 deletions(-) delete mode 100644 docs/.readthedocs.yaml delete mode 100644 docs/Makefile delete mode 100644 docs/_build/html/.buildinfo delete mode 100644 docs/_build/html/genindex.html delete mode 100644 docs/_build/html/index.html delete mode 100644 docs/_build/html/objects.inv delete mode 100644 docs/_build/html/py-modindex.html delete mode 100644 docs/_build/html/searchindex.js delete mode 100644 docs/_build/html/sprit.html delete mode 100644 docs/_build/html/sprit.sprit_cli.html delete mode 100644 docs/_build/html/sprit.sprit_hvsr.html delete mode 100644 docs/_build/html/sprit.sprit_jupyter_UI.html delete mode 100644 docs/_build/html/sprit.sprit_plot.html delete mode 100644 docs/_build/html/sprit.sprit_streamlit_ui.html delete mode 100644 docs/_build/html/sprit.sprit_tkinter_ui.html delete mode 100644 docs/_build/html/sprit.sprit_utils.html rename docs/{_build/html/search.html => _modules/index.html} (51%) create mode 100644 docs/_modules/sprit/sprit_cli.html create mode 100644 docs/_modules/sprit/sprit_hvsr.html create mode 100644 docs/_modules/sprit/sprit_jupyter_UI.html create mode 100644 docs/_modules/sprit/sprit_plot.html create mode 100644 docs/_modules/sprit/sprit_streamlit_ui.html create mode 100644 docs/_modules/sprit/sprit_tkinter_ui.html create mode 100644 docs/_modules/sprit/sprit_utils.html rename docs/{index.rst => _sources/index.rst.txt} (100%) rename docs/{sprit.rst => _sources/sprit.rst.txt} (100%) rename docs/{sprit.sprit_cli.rst => _sources/sprit.sprit_cli.rst.txt} (100%) rename docs/{sprit.sprit_hvsr.rst => _sources/sprit.sprit_hvsr.rst.txt} (100%) rename docs/{sprit.sprit_jupyter_UI.rst => _sources/sprit.sprit_jupyter_UI.rst.txt} (100%) rename docs/{sprit.sprit_plot.rst => _sources/sprit.sprit_plot.rst.txt} (100%) rename docs/{sprit.sprit_streamlit_ui.rst => _sources/sprit.sprit_streamlit_ui.rst.txt} (100%) rename docs/{sprit.sprit_tkinter_ui.rst => _sources/sprit.sprit_tkinter_ui.rst.txt} (100%) rename docs/{sprit.sprit_utils.rst => _sources/sprit.sprit_utils.rst.txt} (100%) rename docs/{_build/doctrees => }/environment.pickle (97%) rename docs/{_build/doctrees => }/index.doctree (100%) delete mode 100644 docs/make.bat rename docs/{_build/doctrees => }/sprit.doctree (100%) delete mode 100644 docs/sprit.ptyhonfile.html rename docs/{_build/doctrees => }/sprit.sprit_cli.doctree (100%) rename docs/{_build/doctrees => }/sprit.sprit_hvsr.doctree (100%) rename docs/{_build/doctrees => }/sprit.sprit_jupyter_UI.doctree (100%) rename docs/{_build/doctrees => }/sprit.sprit_plot.doctree (100%) rename docs/{_build/doctrees => }/sprit.sprit_streamlit_ui.doctree (100%) rename docs/{_build/doctrees => }/sprit.sprit_tkinter_ui.doctree (100%) rename docs/{_build/doctrees => }/sprit.sprit_utils.doctree (100%) delete mode 100644 docs/sprit_gui.html create mode 100644 docs/sprit_plot.html create mode 100644 docs/sprit_streamlit_ui.html create mode 100644 docs/sprit_tkinter_ui.html delete mode 100644 docs/theme.css delete mode 100644 docs/theme.js delete mode 100644 sprit/ui_tests/RemiTest.py delete mode 100644 sprit/ui_tests/__init__,py delete mode 100644 sprit/ui_tests/remiOverview.py delete mode 100644 sprit/ui_tests/sprit_nice.py delete mode 100644 sprit/ui_tests/sprit_remi.py diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml deleted file mode 100644 index c26d98f5..00000000 --- a/docs/.readthedocs.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# .readthedocs.yaml -# Read the Docs configuration file -# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details - -# Required -version: 2 - -# Set the OS, Python version and other tools you might need -build: - os: ubuntu-22.04 - tools: - python: "3.12" - # You can also specify other tool versions: - # nodejs: "19" - # rust: "1.64" - # golang: "1.19" - -# Build documentation in the "docs/" directory with Sphinx -sphinx: - configuration: docs/conf.py - builder: html - -# Optionally build your docs in additional formats such as PDF and ePub -formats: - - pdf -# - epub - -# Optional but recommended, declare the Python requirements required -# to build your documentation -# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html -python: - install: - - requirements: docs/requirements.txt \ No newline at end of file diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index d4bb2cbb..00000000 --- a/docs/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/_build/html/.buildinfo b/docs/_build/html/.buildinfo deleted file mode 100644 index 52fb83ee..00000000 --- a/docs/_build/html/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 2f3f51ea76c87fa1efbf5ede4657fbc2 -tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/_build/html/genindex.html b/docs/_build/html/genindex.html deleted file mode 100644 index 23c7f332..00000000 --- a/docs/_build/html/genindex.html +++ /dev/null @@ -1,759 +0,0 @@ - - - - - - Index — sprit 1.4 documentation - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - -
  • -
  • -
-
-
-
-
- - -

Index

- -
- A - | B - | C - | D - | E - | F - | G - | H - | I - | K - | L - | M - | O - | P - | R - | S - | T - | W - | X - -
-

A

- - -
- -

B

- - - -
- -

C

- - - -
- -

D

- - -
- -

E

- - - -
- -

F

- - - -
- -

G

- - - -
- -

H

- - - -
- -

I

- - - -
- -

K

- - -
- -

L

- - -
- -

M

- - -
- -

O

- - - -
- -

P

- - - -
- -

R

- - - -
- -

S

- - - -
- -

T

- - - -
- -

W

- - -
- -

X

- - -
- - - -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/index.html b/docs/_build/html/index.html deleted file mode 100644 index b13233c7..00000000 --- a/docs/_build/html/index.html +++ /dev/null @@ -1,356 +0,0 @@ - - - - - - - Welcome to the documentation for SpRIT! — sprit 1.4 documentation - - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -
-

Welcome to the documentation for SpRIT!

-
-

Contents:

- -
-
-
-

Indices and tables

- -
- - -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/objects.inv b/docs/_build/html/objects.inv deleted file mode 100644 index 219ac2cd7c303d79e14f6c3e173fa5587add17a9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1478 zcmV;%1v&a7AX9K?X>NERX>N99Zgg*Qc_4OWa&u{KZXhxWBOp+6Z)#;@bUGk&aB^vM z3L_v^WpZ8b#rNMXCQiPX<{x4c-pO+%aZFR5Qg{XDU?dKGnKPFyQH$nEKb$bOfE}`k=iz~ zBnTIeou>~7Uu0WAx228CCI0>p&rJqzTW(DCb!;%9e6FQB3>ARg$fm4^y^`9}EtmmoMZY!_(=}M8fm)%k z(2=Uy!hxh$tt7n=r3t@r%TdD@gwG$R(NoJJZ~Tw38hA&lh2kY61EP}* zmQ)`Zj0i3==Upf*K)ns>mFsIO zWS`OxuFHad`QhUY#-!$L?OJYu@&82U>;`8P>H)2#HYj~)09!lZU+Z5dG<48gf@R0` z7Oz&ow*tc&o+K={Y=dKn&;mOWfbIu`DoB@c1EMc(%Dw{ z7^|J$;bM6(QyO*xKGAv&#rh=;__7U5}9szux`C#0LIcme|T)%0rkBPR|-yXWa6JIECuC=pV5?4fA9hLSeQM zgCLWd++yW0gl6QN#Ipv&IZR*yjpfD9_6(nyKJFoo85A(()l6*Wfatl zycqJB5f959KH@34BS{?P7y^Y|M}aadX%8dfvfTQAE`1U>7Gu0GmiBSKfKW^rhgZe$DQ+}suj7Rw*KhrM9X8-^I diff --git a/docs/_build/html/py-modindex.html b/docs/_build/html/py-modindex.html deleted file mode 100644 index 48511240..00000000 --- a/docs/_build/html/py-modindex.html +++ /dev/null @@ -1,157 +0,0 @@ - - - - - - Python Module Index — sprit 1.4 documentation - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - -
  • -
  • -
-
-
-
-
- - -

Python Module Index

- -
- s -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 
- s
- sprit -
    - sprit.sprit_cli -
    - sprit.sprit_hvsr -
    - sprit.sprit_jupyter_UI -
    - sprit.sprit_plot -
    - sprit.sprit_streamlit_ui -
    - sprit.sprit_tkinter_ui -
    - sprit.sprit_utils -
- - -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/searchindex.js b/docs/_build/html/searchindex.js deleted file mode 100644 index cf559053..00000000 --- a/docs/_build/html/searchindex.js +++ /dev/null @@ -1 +0,0 @@ -Search.setIndex({"alltitles": {"Contents:": [[0, null]], "Indices and tables": [[0, "indices-and-tables"]], "Submodules": [[1, "submodules"]], "Welcome to the documentation for SpRIT!": [[0, null]], "sprit package": [[1, null]], "sprit.sprit_cli module": [[2, null]], "sprit.sprit_hvsr module": [[3, null]], "sprit.sprit_jupyter_UI module": [[4, null]], "sprit.sprit_plot module": [[5, null]], "sprit.sprit_streamlit_ui module": [[6, null]], "sprit.sprit_tkinter_ui module": [[7, null]], "sprit.sprit_utils module": [[8, null]]}, "docnames": ["index", "sprit", "sprit.sprit_cli", "sprit.sprit_hvsr", "sprit.sprit_jupyter_UI", "sprit.sprit_plot", "sprit.sprit_streamlit_ui", "sprit.sprit_tkinter_ui", "sprit.sprit_utils"], "envversion": {"sphinx": 62, "sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.todo": 2, "sphinx.ext.viewcode": 1}, "filenames": ["index.rst", "sprit.rst", "sprit.sprit_cli.rst", "sprit.sprit_hvsr.rst", "sprit.sprit_jupyter_UI.rst", "sprit.sprit_plot.rst", "sprit.sprit_streamlit_ui.rst", "sprit.sprit_tkinter_ui.rst", "sprit.sprit_utils.rst"], "indexentries": {"assert_check() (in module sprit)": [[1, "sprit.assert_check", false]], "assert_check() (in module sprit.sprit_utils)": [[8, "sprit.sprit_utils.assert_check", false]], "batch (sprit.hvsrdata property)": [[1, "sprit.HVSRData.batch", false]], "batch (sprit.sprit_hvsr.hvsrdata property)": [[3, "sprit.sprit_hvsr.HVSRData.batch", false]], "batch_data_read() (in module sprit)": [[1, "sprit.batch_data_read", false]], "batch_data_read() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.batch_data_read", false]], "calculate_azimuth() (in module sprit)": [[1, "sprit.calculate_azimuth", false]], "calculate_azimuth() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.calculate_azimuth", false]], "catch_errors() (in module sprit)": [[1, "sprit.catch_errors", false]], "catch_errors() (in module sprit.sprit_tkinter_ui)": [[7, "sprit.sprit_tkinter_ui.catch_errors", false]], "check_gui_requirements() (in module sprit)": [[1, "sprit.check_gui_requirements", false]], "check_gui_requirements() (in module sprit.sprit_utils)": [[8, "sprit.sprit_utils.check_gui_requirements", false]], "check_if_default() (in module sprit.sprit_streamlit_ui)": [[6, "sprit.sprit_streamlit_ui.check_if_default", false]], "check_instance() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.check_instance", false]], "check_mark() (in module sprit)": [[1, "sprit.check_mark", false]], "check_mark() (in module sprit.sprit_utils)": [[8, "sprit.sprit_utils.check_mark", false]], "check_peaks() (in module sprit)": [[1, "sprit.check_peaks", false]], "check_peaks() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.check_peaks", false]], "check_tsteps() (in module sprit)": [[1, "sprit.check_tsteps", false]], "check_tsteps() (in module sprit.sprit_utils)": [[8, "sprit.sprit_utils.check_tsteps", false]], "check_xvalues() (in module sprit)": [[1, "sprit.check_xvalues", false]], "check_xvalues() (in module sprit.sprit_utils)": [[8, "sprit.sprit_utils.check_xvalues", false]], "checkifpath() (in module sprit)": [[1, "sprit.checkifpath", false]], "checkifpath() (in module sprit.sprit_utils)": [[8, "sprit.sprit_utils.checkifpath", false]], "copy() (sprit.hvsrbatch method)": [[1, "sprit.HVSRBatch.copy", false]], "copy() (sprit.hvsrdata method)": [[1, "sprit.HVSRData.copy", false]], "copy() (sprit.sprit_hvsr.hvsrbatch method)": [[3, "sprit.sprit_hvsr.HVSRBatch.copy", false]], "copy() (sprit.sprit_hvsr.hvsrdata method)": [[3, "sprit.sprit_hvsr.HVSRData.copy", false]], "create_jupyter_ui() (in module sprit)": [[1, "sprit.create_jupyter_ui", false]], "create_jupyter_ui() (in module sprit.sprit_jupyter_ui)": [[4, "sprit.sprit_jupyter_UI.create_jupyter_ui", false]], "create_menubar() (sprit.sprit_tkinter_ui.sprit_app method)": [[7, "sprit.sprit_tkinter_ui.SPRIT_App.create_menubar", false]], "create_tabs() (sprit.sprit_tkinter_ui.sprit_app method)": [[7, "sprit.sprit_tkinter_ui.SPRIT_App.create_tabs", false]], "datastream (sprit.hvsrdata property)": [[1, "sprit.HVSRData.datastream", false]], "datastream (sprit.sprit_hvsr.hvsrdata property)": [[3, "sprit.sprit_hvsr.HVSRData.datastream", false]], "export() (sprit.hvsrbatch method)": [[1, "sprit.HVSRBatch.export", false]], "export() (sprit.hvsrdata method)": [[1, "sprit.HVSRData.export", false]], "export() (sprit.sprit_hvsr.hvsrbatch method)": [[3, "sprit.sprit_hvsr.HVSRBatch.export", false]], "export() (sprit.sprit_hvsr.hvsrdata method)": [[3, "sprit.sprit_hvsr.HVSRData.export", false]], "export_data() (in module sprit)": [[1, "sprit.export_data", false]], "export_data() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.export_data", false]], "export_report() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.export_report", false]], "export_settings() (in module sprit)": [[1, "sprit.export_settings", false]], "export_settings() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.export_settings", false]], "export_settings() (sprit.hvsrbatch method)": [[1, "sprit.HVSRBatch.export_settings", false]], "export_settings() (sprit.hvsrdata method)": [[1, "sprit.HVSRData.export_settings", false]], "export_settings() (sprit.sprit_hvsr.hvsrbatch method)": [[3, "sprit.sprit_hvsr.HVSRBatch.export_settings", false]], "export_settings() (sprit.sprit_hvsr.hvsrdata method)": [[3, "sprit.sprit_hvsr.HVSRData.export_settings", false]], "fetch_data() (in module sprit)": [[1, "sprit.fetch_data", false]], "fetch_data() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.fetch_data", false]], "format_time() (in module sprit)": [[1, "sprit.format_time", false]], "format_time() (in module sprit.sprit_utils)": [[8, "sprit.sprit_utils.format_time", false]], "generate_ppsds() (in module sprit)": [[1, "sprit.generate_ppsds", false]], "generate_ppsds() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.generate_ppsds", false]], "get_char() (in module sprit)": [[1, "sprit.get_char", false]], "get_char() (in module sprit.sprit_utils)": [[8, "sprit.sprit_utils.get_char", false]], "get_check_peaks_kwargs() (in module sprit.sprit_plot)": [[5, "sprit.sprit_plot.get_check_peaks_kwargs", false]], "get_default() (in module sprit.sprit_jupyter_ui)": [[4, "sprit.sprit_jupyter_UI.get_default", false]], "get_generate_ppsd_kwargs() (in module sprit.sprit_plot)": [[5, "sprit.sprit_plot.get_generate_ppsd_kwargs", false]], "get_get_report_kwargs() (in module sprit.sprit_plot)": [[5, "sprit.sprit_plot.get_get_report_kwargs", false]], "get_metadata() (in module sprit)": [[1, "sprit.get_metadata", false]], "get_metadata() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.get_metadata", false]], "get_param_docstring() (in module sprit.sprit_cli)": [[2, "sprit.sprit_cli.get_param_docstring", false]], "get_process_hvsr_kwargs() (in module sprit.sprit_plot)": [[5, "sprit.sprit_plot.get_process_hvsr_kwargs", false]], "get_remove_noise_kwargs() (in module sprit.sprit_plot)": [[5, "sprit.sprit_plot.get_remove_noise_kwargs", false]], "get_remove_outlier_curve_kwargs() (in module sprit.sprit_plot)": [[5, "sprit.sprit_plot.get_remove_outlier_curve_kwargs", false]], "get_report() (in module sprit)": [[1, "sprit.get_report", false]], "get_report() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.get_report", false]], "get_report() (sprit.hvsrbatch method)": [[1, "sprit.HVSRBatch.get_report", false]], "get_report() (sprit.hvsrdata method)": [[1, "sprit.HVSRData.get_report", false]], "get_report() (sprit.sprit_hvsr.hvsrbatch method)": [[3, "sprit.sprit_hvsr.HVSRBatch.get_report", false]], "get_report() (sprit.sprit_hvsr.hvsrdata method)": [[3, "sprit.sprit_hvsr.HVSRData.get_report", false]], "gui() (in module sprit)": [[1, "sprit.gui", false]], "gui() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.gui", false]], "gui_test() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.gui_test", false]], "has_required_channels() (in module sprit)": [[1, "sprit.has_required_channels", false]], "has_required_channels() (in module sprit.sprit_utils)": [[8, "sprit.sprit_utils.has_required_channels", false]], "hvsrbatch (class in sprit)": [[1, "sprit.HVSRBatch", false]], "hvsrbatch (class in sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.HVSRBatch", false]], "hvsrdata (class in sprit)": [[1, "sprit.HVSRData", false]], "hvsrdata (class in sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.HVSRData", false]], "import_data() (in module sprit)": [[1, "sprit.import_data", false]], "import_data() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.import_data", false]], "import_settings() (in module sprit)": [[1, "sprit.import_settings", false]], "import_settings() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.import_settings", false]], "input_params() (in module sprit)": [[1, "sprit.input_params", false]], "input_params() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.input_params", false]], "items() (sprit.hvsrbatch method)": [[1, "sprit.HVSRBatch.items", false]], "items() (sprit.hvsrdata method)": [[1, "sprit.HVSRData.items", false]], "items() (sprit.sprit_hvsr.hvsrbatch method)": [[3, "sprit.sprit_hvsr.HVSRBatch.items", false]], "items() (sprit.sprit_hvsr.hvsrdata method)": [[3, "sprit.sprit_hvsr.HVSRData.items", false]], "keys() (sprit.hvsrbatch method)": [[1, "sprit.HVSRBatch.keys", false]], "keys() (sprit.hvsrdata method)": [[1, "sprit.HVSRData.keys", false]], "keys() (sprit.sprit_hvsr.hvsrbatch method)": [[3, "sprit.sprit_hvsr.HVSRBatch.keys", false]], "keys() (sprit.sprit_hvsr.hvsrdata method)": [[3, "sprit.sprit_hvsr.HVSRData.keys", false]], "log_errormsg() (sprit.sprit_tkinter_ui.sprit_app method)": [[7, "sprit.sprit_tkinter_ui.SPRIT_App.log_errorMsg", false]], "main() (in module sprit.sprit_cli)": [[2, "sprit.sprit_cli.main", false]], "make_it_classy() (in module sprit)": [[1, "sprit.make_it_classy", false]], "make_it_classy() (in module sprit.sprit_utils)": [[8, "sprit.sprit_utils.make_it_classy", false]], "manual_label_update() (sprit.sprit_tkinter_ui.sprit_app method)": [[7, "sprit.sprit_tkinter_ui.SPRIT_App.manual_label_update", false]], "module": [[1, "module-sprit", false], [2, "module-sprit.sprit_cli", false], [3, "module-sprit.sprit_hvsr", false], [4, "module-sprit.sprit_jupyter_UI", false], [5, "module-sprit.sprit_plot", false], [6, "module-sprit.sprit_streamlit_ui", false], [7, "module-sprit.sprit_tkinter_ui", false], [8, "module-sprit.sprit_utils", false]], "on_closing() (in module sprit.sprit_tkinter_ui)": [[7, "sprit.sprit_tkinter_ui.on_closing", false]], "on_file_upload() (in module sprit.sprit_streamlit_ui)": [[6, "sprit.sprit_streamlit_ui.on_file_upload", false]], "on_run_data() (in module sprit.sprit_streamlit_ui)": [[6, "sprit.sprit_streamlit_ui.on_run_data", false]], "params (sprit.hvsrdata property)": [[1, "sprit.HVSRData.params", false]], "params (sprit.sprit_hvsr.hvsrdata property)": [[3, "sprit.sprit_hvsr.HVSRData.params", false]], "parse_comp_plot_list() (in module sprit.sprit_plot)": [[5, "sprit.sprit_plot.parse_comp_plot_list", false]], "parse_hv_plot_list() (in module sprit.sprit_plot)": [[5, "sprit.sprit_plot.parse_hv_plot_list", false]], "parse_plot_string() (in module sprit)": [[1, "sprit.parse_plot_string", false]], "parse_plot_string() (in module sprit.sprit_plot)": [[5, "sprit.sprit_plot.parse_plot_string", false]], "parse_spec_plot_list() (in module sprit.sprit_plot)": [[5, "sprit.sprit_plot.parse_spec_plot_list", false]], "plot() (sprit.hvsrbatch method)": [[1, "sprit.HVSRBatch.plot", false]], "plot() (sprit.hvsrdata method)": [[1, "sprit.HVSRData.plot", false]], "plot() (sprit.sprit_hvsr.hvsrbatch method)": [[3, "sprit.sprit_hvsr.HVSRBatch.plot", false]], "plot() (sprit.sprit_hvsr.hvsrdata method)": [[3, "sprit.sprit_hvsr.HVSRData.plot", false]], "plot_azimuth() (in module sprit)": [[1, "sprit.plot_azimuth", false]], "plot_azimuth() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.plot_azimuth", false]], "plot_hvsr() (in module sprit)": [[1, "sprit.plot_hvsr", false]], "plot_hvsr() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.plot_hvsr", false]], "plot_outlier_curves() (in module sprit)": [[1, "sprit.plot_outlier_curves", false]], "plot_outlier_curves() (in module sprit.sprit_plot)": [[5, "sprit.sprit_plot.plot_outlier_curves", false]], "plot_preview() (in module sprit)": [[1, "sprit.plot_preview", false]], "plot_preview() (in module sprit.sprit_plot)": [[5, "sprit.sprit_plot.plot_preview", false]], "plot_results() (in module sprit)": [[1, "sprit.plot_results", false]], "plot_results() (in module sprit.sprit_plot)": [[5, "sprit.sprit_plot.plot_results", false]], "plot_stream() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.plot_stream", false]], "ppsds (sprit.hvsrdata property)": [[1, "sprit.HVSRData.ppsds", false]], "ppsds (sprit.sprit_hvsr.hvsrdata property)": [[3, "sprit.sprit_hvsr.HVSRData.ppsds", false]], "ppsds_obspy (sprit.hvsrdata property)": [[1, "sprit.HVSRData.ppsds_obspy", false]], "ppsds_obspy (sprit.sprit_hvsr.hvsrdata property)": [[3, "sprit.sprit_hvsr.HVSRData.ppsds_obspy", false]], "print_param() (in module sprit.sprit_streamlit_ui)": [[6, "sprit.sprit_streamlit_ui.print_param", false]], "process_data() (in module sprit.sprit_plot)": [[5, "sprit.sprit_plot.process_data", false]], "process_hvsr() (in module sprit)": [[1, "sprit.process_hvsr", false]], "process_hvsr() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.process_hvsr", false]], "read_data() (in module sprit.sprit_plot)": [[5, "sprit.sprit_plot.read_data", false]], "read_from_rs() (in module sprit)": [[1, "sprit.read_from_RS", false]], "read_from_rs() (in module sprit.sprit_utils)": [[8, "sprit.sprit_utils.read_from_RS", false]], "read_tromino_files() (in module sprit)": [[1, "sprit.read_tromino_files", false]], "read_tromino_files() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.read_tromino_files", false]], "reboot_app() (in module sprit.sprit_tkinter_ui)": [[7, "sprit.sprit_tkinter_ui.reboot_app", false]], "remove_noise() (in module sprit)": [[1, "sprit.remove_noise", false]], "remove_noise() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.remove_noise", false]], "remove_outlier_curves() (in module sprit)": [[1, "sprit.remove_outlier_curves", false]], "remove_outlier_curves() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.remove_outlier_curves", false]], "report() (sprit.hvsrbatch method)": [[1, "sprit.HVSRBatch.report", false]], "report() (sprit.hvsrdata method)": [[1, "sprit.HVSRData.report", false]], "report() (sprit.sprit_hvsr.hvsrbatch method)": [[3, "sprit.sprit_hvsr.HVSRBatch.report", false]], "report() (sprit.sprit_hvsr.hvsrdata method)": [[3, "sprit.sprit_hvsr.HVSRData.report", false]], "run() (in module sprit)": [[1, "sprit.run", false]], "run() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.run", false]], "setup_session_state() (in module sprit.sprit_streamlit_ui)": [[6, "sprit.sprit_streamlit_ui.setup_session_state", false]], "sprit": [[1, "module-sprit", false]], "sprit.sprit_cli": [[2, "module-sprit.sprit_cli", false]], "sprit.sprit_hvsr": [[3, "module-sprit.sprit_hvsr", false]], "sprit.sprit_jupyter_ui": [[4, "module-sprit.sprit_jupyter_UI", false]], "sprit.sprit_plot": [[5, "module-sprit.sprit_plot", false]], "sprit.sprit_streamlit_ui": [[6, "module-sprit.sprit_streamlit_ui", false]], "sprit.sprit_tkinter_ui": [[7, "module-sprit.sprit_tkinter_ui", false]], "sprit.sprit_utils": [[8, "module-sprit.sprit_utils", false]], "sprit_app (class in sprit.sprit_tkinter_ui)": [[7, "sprit.sprit_tkinter_ui.SPRIT_App", false]], "test_function() (in module sprit.sprit_hvsr)": [[3, "sprit.sprit_hvsr.test_function", false]], "text_change() (in module sprit.sprit_streamlit_ui)": [[6, "sprit.sprit_streamlit_ui.text_change", false]], "time_it() (in module sprit)": [[1, "sprit.time_it", false]], "time_it() (in module sprit.sprit_utils)": [[8, "sprit.sprit_utils.time_it", false]], "write_to_info_tab() (in module sprit.sprit_streamlit_ui)": [[6, "sprit.sprit_streamlit_ui.write_to_info_tab", false]], "x_mark() (in module sprit)": [[1, "sprit.x_mark", false]], "x_mark() (in module sprit.sprit_utils)": [[8, "sprit.sprit_utils.x_mark", false]]}, "objects": {"": [[1, 0, 0, "-", "sprit"]], "sprit": [[1, 1, 1, "", "HVSRBatch"], [1, 1, 1, "", "HVSRData"], [1, 4, 1, "", "assert_check"], [1, 4, 1, "", "batch_data_read"], [1, 4, 1, "", "calculate_azimuth"], [1, 4, 1, "", "catch_errors"], [1, 4, 1, "", "check_gui_requirements"], [1, 4, 1, "", "check_mark"], [1, 4, 1, "", "check_peaks"], [1, 4, 1, "", "check_tsteps"], [1, 4, 1, "", "check_xvalues"], [1, 4, 1, "", "checkifpath"], [1, 4, 1, "", "create_jupyter_ui"], [1, 4, 1, "", "export_data"], [1, 4, 1, "", "export_settings"], [1, 4, 1, "", "fetch_data"], [1, 4, 1, "", "format_time"], [1, 4, 1, "", "generate_ppsds"], [1, 4, 1, "", "get_char"], [1, 4, 1, "", "get_metadata"], [1, 4, 1, "", "get_report"], [1, 4, 1, "", "gui"], [1, 4, 1, "", "has_required_channels"], [1, 4, 1, "", "import_data"], [1, 4, 1, "", "import_settings"], [1, 4, 1, "", "input_params"], [1, 4, 1, "", "make_it_classy"], [1, 4, 1, "", "parse_plot_string"], [1, 4, 1, "", "plot_azimuth"], [1, 4, 1, "", "plot_hvsr"], [1, 4, 1, "", "plot_outlier_curves"], [1, 4, 1, "", "plot_preview"], [1, 4, 1, "", "plot_results"], [1, 4, 1, "", "process_hvsr"], [1, 4, 1, "", "read_from_RS"], [1, 4, 1, "", "read_tromino_files"], [1, 4, 1, "", "remove_noise"], [1, 4, 1, "", "remove_outlier_curves"], [1, 4, 1, "", "run"], [2, 0, 0, "-", "sprit_cli"], [3, 0, 0, "-", "sprit_hvsr"], [4, 0, 0, "-", "sprit_jupyter_UI"], [5, 0, 0, "-", "sprit_plot"], [6, 0, 0, "-", "sprit_streamlit_ui"], [7, 0, 0, "-", "sprit_tkinter_ui"], [8, 0, 0, "-", "sprit_utils"], [1, 4, 1, "", "time_it"], [1, 4, 1, "", "x_mark"]], "sprit.HVSRBatch": [[1, 2, 1, "", "copy"], [1, 2, 1, "", "export"], [1, 2, 1, "", "export_settings"], [1, 2, 1, "", "get_report"], [1, 2, 1, "", "items"], [1, 2, 1, "", "keys"], [1, 2, 1, "", "plot"], [1, 2, 1, "", "report"]], "sprit.HVSRData": [[1, 3, 1, "", "batch"], [1, 2, 1, "", "copy"], [1, 3, 1, "", "datastream"], [1, 2, 1, "", "export"], [1, 2, 1, "", "export_settings"], [1, 2, 1, "", "get_report"], [1, 2, 1, "", "items"], [1, 2, 1, "", "keys"], [1, 3, 1, "", "params"], [1, 2, 1, "", "plot"], [1, 3, 1, "", "ppsds"], [1, 3, 1, "", "ppsds_obspy"], [1, 2, 1, "", "report"]], "sprit.sprit_cli": [[2, 4, 1, "", "get_param_docstring"], [2, 4, 1, "", "main"]], "sprit.sprit_hvsr": [[3, 1, 1, "", "HVSRBatch"], [3, 1, 1, "", "HVSRData"], [3, 4, 1, "", "batch_data_read"], [3, 4, 1, "", "calculate_azimuth"], [3, 4, 1, "", "check_instance"], [3, 4, 1, "", "check_peaks"], [3, 4, 1, "", "export_data"], [3, 4, 1, "", "export_report"], [3, 4, 1, "", "export_settings"], [3, 4, 1, "", "fetch_data"], [3, 4, 1, "", "generate_ppsds"], [3, 4, 1, "", "get_metadata"], [3, 4, 1, "", "get_report"], [3, 4, 1, "", "gui"], [3, 4, 1, "", "gui_test"], [3, 4, 1, "", "import_data"], [3, 4, 1, "", "import_settings"], [3, 4, 1, "", "input_params"], [3, 4, 1, "", "plot_azimuth"], [3, 4, 1, "", "plot_hvsr"], [3, 4, 1, "", "plot_stream"], [3, 4, 1, "", "process_hvsr"], [3, 4, 1, "", "read_tromino_files"], [3, 4, 1, "", "remove_noise"], [3, 4, 1, "", "remove_outlier_curves"], [3, 4, 1, "", "run"], [3, 4, 1, "", "test_function"]], "sprit.sprit_hvsr.HVSRBatch": [[3, 2, 1, "", "copy"], [3, 2, 1, "", "export"], [3, 2, 1, "", "export_settings"], [3, 2, 1, "", "get_report"], [3, 2, 1, "", "items"], [3, 2, 1, "", "keys"], [3, 2, 1, "", "plot"], [3, 2, 1, "", "report"]], "sprit.sprit_hvsr.HVSRData": [[3, 3, 1, "", "batch"], [3, 2, 1, "", "copy"], [3, 3, 1, "", "datastream"], [3, 2, 1, "", "export"], [3, 2, 1, "", "export_settings"], [3, 2, 1, "", "get_report"], [3, 2, 1, "", "items"], [3, 2, 1, "", "keys"], [3, 3, 1, "", "params"], [3, 2, 1, "", "plot"], [3, 3, 1, "", "ppsds"], [3, 3, 1, "", "ppsds_obspy"], [3, 2, 1, "", "report"]], "sprit.sprit_jupyter_UI": [[4, 4, 1, "", "create_jupyter_ui"], [4, 4, 1, "", "get_default"]], "sprit.sprit_plot": [[5, 4, 1, "", "get_check_peaks_kwargs"], [5, 4, 1, "", "get_generate_ppsd_kwargs"], [5, 4, 1, "", "get_get_report_kwargs"], [5, 4, 1, "", "get_process_hvsr_kwargs"], [5, 4, 1, "", "get_remove_noise_kwargs"], [5, 4, 1, "", "get_remove_outlier_curve_kwargs"], [5, 4, 1, "", "parse_comp_plot_list"], [5, 4, 1, "", "parse_hv_plot_list"], [5, 4, 1, "", "parse_plot_string"], [5, 4, 1, "", "parse_spec_plot_list"], [5, 4, 1, "", "plot_outlier_curves"], [5, 4, 1, "", "plot_preview"], [5, 4, 1, "", "plot_results"], [5, 4, 1, "", "process_data"], [5, 4, 1, "", "read_data"]], "sprit.sprit_streamlit_ui": [[6, 4, 1, "", "check_if_default"], [6, 4, 1, "", "on_file_upload"], [6, 4, 1, "", "on_run_data"], [6, 4, 1, "", "print_param"], [6, 4, 1, "", "setup_session_state"], [6, 4, 1, "", "text_change"], [6, 4, 1, "", "write_to_info_tab"]], "sprit.sprit_tkinter_ui": [[7, 1, 1, "", "SPRIT_App"], [7, 4, 1, "", "catch_errors"], [7, 4, 1, "", "on_closing"], [7, 4, 1, "", "reboot_app"]], "sprit.sprit_tkinter_ui.SPRIT_App": [[7, 2, 1, "", "create_menubar"], [7, 2, 1, "", "create_tabs"], [7, 2, 1, "", "log_errorMsg"], [7, 2, 1, "", "manual_label_update"]], "sprit.sprit_utils": [[8, 4, 1, "", "assert_check"], [8, 4, 1, "", "check_gui_requirements"], [8, 4, 1, "", "check_mark"], [8, 4, 1, "", "check_tsteps"], [8, 4, 1, "", "check_xvalues"], [8, 4, 1, "", "checkifpath"], [8, 4, 1, "", "format_time"], [8, 4, 1, "", "get_char"], [8, 4, 1, "", "has_required_channels"], [8, 4, 1, "", "make_it_classy"], [8, 4, 1, "", "read_from_RS"], [8, 4, 1, "", "time_it"], [8, 4, 1, "", "x_mark"]]}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "method", "Python method"], "3": ["py", "property", "Python property"], "4": ["py", "function", "Python function"]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:method", "3": "py:property", "4": "py:function"}, "terms": {"": [1, 2, 3], "0": [1, 3, 5, 8], "00": [1, 3, 8], "000000z": [1, 3], "03125": [1, 3], "05": [1, 3], "1": [1, 3, 8], "10": [1, 3], "100": [1, 3], "1000": [1, 3], "1012122": [1, 3], "125": [1, 3], "128": [1, 3], "13": [1, 3], "16": [1, 3], "2": [1, 3], "2004": [1, 3], "2023": [1, 8], "2024": [1, 3], "2290526": [1, 3], "23": [1, 3], "24576": [1, 3], "3": [1, 3], "30": [1, 3], "30t00": [1, 3], "30t23": [1, 3], "33": [1, 3], "3600": [1, 3], "3g": [1, 3], "4": [1, 3], "40": [1, 3], "400": [1, 3], "4326": [1, 3], "5": [1, 3, 8], "51": [1, 3], "59": [1, 3], "6": [1, 3, 8], "75": 3, "755": [1, 3], "8": [1, 3], "80": [1, 3], "88": [1, 3], "98": [1, 3, 5], "98th": [1, 3], "99": [1, 3], "995": [1, 3], "999999": [1, 3], "999999z": [1, 3], "A": [1, 3], "By": [1, 3], "For": [1, 2, 3], "If": [1, 3, 8], "In": [1, 2, 3], "It": [1, 3], "No": [1, 3], "Not": [1, 3], "The": [1, 2, 3, 8], "These": [1, 3], "_": [1, 3], "_datapath_": [1, 3], "_description_": [1, 3], "_read_rs_metadata": [1, 3], "_t": [1, 8], "_type_": [1, 3], "a4": [1, 3], "abber": [1, 3], "abl": [1, 3, 8], "about": [1, 3], "abov": [1, 3], "accept": [1, 3], "access": [1, 3], "accessor": [1, 3], "account": [1, 3], "acq_dat": [1, 3], "acquir": [1, 3], "action": 7, "actual": [1, 3], "ad": [1, 3], "add": [1, 3], "addit": [1, 3], "addition": 3, "adjust": 3, "affect": [1, 3], "after": [1, 3], "again": [1, 3], "against": [1, 3], "algorithm": [1, 3], "all": [1, 2, 3, 7, 8], "allow": [1, 3], "alreadi": [1, 3], "also": [1, 2, 3], "am": [1, 3, 8], "amateur": [1, 3], "ambient": [1, 3], "amend": [1, 3], "american": [1, 3], "amplitud": [1, 3], "an": [1, 3], "analysi": [1, 3], "analyz": [1, 3], "angl": [1, 3], "angular": [1, 3], "ani": [1, 2, 3, 7], "ann": [1, 3, 5], "annot": [1, 3], "anoth": [1, 8], "antitrigg": [1, 3], "anyth": [1, 3, 8], "api": [1, 3], "app": 7, "appear": [1, 3], "append": [1, 3], "appli": [1, 3], "appropri": [1, 3], "ar": [1, 2, 3, 8], "archiv": [1, 8], "arg": [1, 3], "argument": [1, 2, 3], "arithmet": [1, 3], "arrang": [1, 3], "asid": [1, 3], "assert_check": [0, 1, 8], "assign": [1, 3], "assum": [1, 8], "assumpt": [1, 3], "attribut": [1, 3], "auto": [1, 3], "autogen": [1, 3], "automat": [1, 3], "avail": [1, 3], "averag": [1, 3], "ax": [1, 3], "axi": [1, 3], "az": [1, 3, 8], "azimuth": [1, 3, 5], "azimuth_angl": [1, 3], "azimuth_calcul": [1, 3], "azimuth_typ": [1, 3], "azimuth_unit": [1, 3], "azimuthal_ppsd": [1, 3], "back": [1, 3], "bandwidth": [1, 3], "base": [1, 3, 7], "basic": [1, 3], "batch": [0, 1, 3], "batch_data": [1, 3], "batch_data_read": [0, 1, 3], "batch_param": [1, 3], "batch_typ": [1, 3], "been": [1, 3], "befor": [1, 3, 7], "being": [1, 3], "below": [1, 3], "besid": [1, 3], "best": [1, 3], "between": [1, 3], "bin": [1, 3], "bool": [1, 3], "both": [1, 3], "bracket": [1, 3], "broader": [1, 3], "browser": [1, 3], "buffer": [1, 3], "bulk": [1, 3], "button": 5, "c": [1, 3, 5], "calcul": [1, 3], "calculate_azimuth": [0, 1, 3], "call": [1, 3, 7, 8], "can": [1, 2, 3, 8], "cannot": [1, 3], "carri": [1, 3], "case": [1, 2, 3], "catch_error": [0, 1, 7], "caught": [1, 3], "cdt": [1, 8], "chang": [1, 3], "channel": [1, 3], "charact": [1, 8], "chart": [1, 3], "check": [1, 3, 8], "check_gui_requir": [0, 1, 8], "check_if_default": [0, 1, 6], "check_inst": [0, 1, 3], "check_mark": [0, 1, 8], "check_peak": [0, 1, 3], "check_tstep": [0, 1, 8], "check_xvalu": [0, 1, 8], "checkifpath": [0, 1, 8], "chosen": [1, 3], "circular": [1, 3], "class": [1, 3, 7], "cleanup": 7, "clear": [1, 3], "clear_fig": [1, 3], "click": [1, 3], "clip": 3, "close": [1, 3], "close_fig": [1, 3], "co": [1, 3], "code": [1, 3], "column": [1, 3], "combin": [1, 3], "command": [2, 3], "commonli": [1, 3], "comp": [1, 3], "comp_plot_list": 5, "compon": [1, 3], "componen": 3, "comput": [1, 8], "cond": [1, 8], "constant": [1, 3], "contain": [1, 3, 7], "convert": [1, 3, 8], "cooldown": [1, 3], "cooldown_tim": [1, 3], "coordin": [1, 3], "copi": [0, 1, 3], "core": [1, 3], "corner": [1, 3], "correct": [1, 3], "correctli": [1, 2, 3, 8], "correspond": 2, "cr": [1, 3], "creat": [1, 3, 4, 7], "create_jupyter_ui": [0, 1, 4], "create_menubar": [1, 7], "create_tab": [1, 7], "creation": [1, 3], "criteria": [1, 3], "cst": [1, 8], "csv": [1, 2, 3], "csv_handl": [1, 3], "current": [1, 3, 7], "curv": [1, 3], "customiz": 3, "cut": [1, 3], "cutoff": [1, 3], "d": [1, 3], "dai": [1, 3], "data": [1, 2, 3, 7, 8], "data_export_format": [1, 3], "data_export_path": [1, 3], "data_format": [1, 3], "databas": [1, 3], "datafram": [1, 3], "dataset": [1, 3], "datastream": [0, 1, 3], "date": [1, 3, 8], "datetim": [1, 3, 8], "dd": [1, 3], "decim": [1, 3], "decod": [1, 8], "deep": [1, 3], "deepcopi": [1, 3], "default": [1, 3, 8], "defin": [1, 3], "defualt": [1, 3], "deg": [1, 3], "degre": [1, 3], "deliv": [1, 3], "densiti": [1, 3], "depend": [1, 3], "depth": [1, 3], "descript": [1, 3], "design": [1, 3], "dest": [1, 8], "detail": [1, 3], "determin": [1, 3], "detrend": [1, 3], "detrend_ord": [1, 3], "deviat": [1, 3], "dict": [1, 3], "dict_item": [1, 3], "dict_kei": [1, 3], "dictionari": [1, 3], "differ": [1, 3], "diffus": [1, 3], "digit": [1, 3], "dir": [1, 3], "direct": [1, 3], "directli": [1, 3], "directori": [1, 3], "disk": [1, 3], "displai": [1, 3, 8], "display": [1, 8], "divid": [1, 3], "do": [1, 3], "doc": [1, 3], "document": [1, 2, 3], "doe": [1, 3, 7], "done": [1, 3, 7], "dot": [1, 3], "due": [1, 3], "dure": [1, 3], "e": [1, 2, 3, 8], "e0": [1, 3], "e1": [1, 3], "each": [1, 3], "eas": [1, 3], "easili": [1, 3], "east": [1, 3], "eastern": [1, 3], "eh": [1, 3], "ehn": [1, 3], "ehr": [1, 3], "ehz": [1, 3], "eie": [1, 3], "eie_": [1, 3], "eie_n": [1, 3], "eie_z": [1, 3], "either": [1, 3], "elaps": [1, 8], "element": [1, 3], "elev": [1, 3], "elev_unit": [1, 3], "els": [1, 3], "en": [1, 3], "encod": [1, 8], "end": [1, 3], "endtim": [1, 3], "energi": [1, 3], "engin": [1, 3], "enhanc": [1, 3], "ensur": [1, 3], "entir": [1, 3], "entri": [1, 3, 8], "epsg": [1, 3], "equal": [1, 3], "equival": [1, 3], "error": [1, 3, 8], "error_messag": [1, 8], "essenti": [1, 3], "etc": [1, 3, 7], "even": [1, 3], "eventu": [1, 3], "everyth": [1, 3], "exampl": [1, 3], "except": [1, 3], "exclud": [1, 3], "excpet": [1, 3], "exist": [1, 3], "expect": [1, 3], "experiment": [1, 3], "explicit": [1, 3], "export": [0, 1, 3], "export_data": [0, 1, 3], "export_report": [0, 1, 3], "export_set": [0, 1, 3], "export_settings_path": [1, 3], "export_settings_typ": [1, 3], "ext": [1, 3], "extens": [1, 3], "extra": [1, 3], "f": [1, 3], "f_smooth_width": [1, 3], "fail": [1, 3], "fairli": [1, 3], "fals": [1, 3, 5, 6, 8], "fast": [1, 3], "feb": [1, 3], "fetch": [1, 3], "fetch_data": [0, 1, 3], "field": [1, 3], "fig": [1, 3], "figur": [1, 3], "file": [1, 3], "filelist": [1, 3], "filenam": [1, 3], "filepath": [1, 3, 8], "filter": [1, 3], "final": [1, 3], "find": [1, 3], "first": [1, 3], "fit": [1, 3], "float": [1, 3], "folder": [1, 3], "follow": [1, 3], "form": [1, 3], "format": [1, 2, 3, 8], "format_tim": [0, 1, 8], "fr": [1, 3], "freq_smooth": [1, 3], "frequenc": [1, 3], "frequnci": [1, 3], "from": [1, 2, 3, 8], "from_roc": [1, 5], "from_user_input": [1, 3], "full": [1, 3], "func": [1, 2, 4, 7], "function": [1, 2, 3, 4, 7, 8], "futur": [1, 3], "g": [1, 2, 3, 8], "gener": [1, 3], "generate_ppsd": [0, 1, 3], "geometr": [1, 3], "geopsi": [1, 3], "get": [1, 2, 3], "get_char": [0, 1, 8], "get_check_peaks_kwarg": [0, 1, 5], "get_default": [0, 1, 4], "get_generate_ppsd_kwarg": [0, 1, 5], "get_get_report_kwarg": [0, 1, 5], "get_metadata": [0, 1, 3], "get_param_docstr": [0, 1, 2], "get_process_hvsr_kwarg": [0, 1, 5], "get_remove_noise_kwarg": [0, 1, 5], "get_remove_outlier_curve_kwarg": [0, 1, 5], "get_report": [0, 1, 3], "gone": [1, 3], "graphic": [1, 3, 7], "greater": [1, 3], "grid": [1, 3], "gui": [0, 1, 3], "gui_test": [0, 1, 3], "h": [1, 3], "h1": [1, 3], "h2": [1, 3], "ha": [1, 3], "handl": [1, 3], "happen": [1, 3], "has_required_channel": [0, 1, 8], "have": [1, 3], "he": [1, 3], "help": [1, 3], "here": [1, 2, 3], "hh": [1, 3], "high": [1, 3], "highest": [1, 3], "hn": [1, 3], "home": [1, 3], "horizont": [1, 3], "horizontal_method": [1, 3], "hostnam": [1, 8], "how": [1, 3], "html": [1, 3], "html_report": [1, 3], "http": [1, 3], "hv": [1, 3, 5], "hv_data": [1, 5], "hv_plot": [1, 3], "hvsr": [1, 3, 5], "hvsr_band": [1, 3], "hvsr_data": [1, 3, 5, 8], "hvsr_export_path": [1, 3], "hvsr_id": [1, 3], "hvsr_out": [1, 3], "hvsr_plot_list": 5, "hvsr_result": [1, 3], "hvsrbatch": [0, 1, 3], "hvsrbatchvari": [1, 3], "hvsrdata": [0, 1, 3], "hz": [1, 3], "i": [1, 2, 3, 8], "iana": [1, 3], "id_prefix": [1, 3], "identifi": [1, 3], "ie": [1, 3], "imag": 3, "immedi": [1, 3], "import": [1, 3], "import_data": [0, 1, 3], "import_filepath": [1, 3], "import_set": [0, 1, 3], "in_char": [1, 8], "includ": [1, 3, 8], "include_loc": [1, 3], "inclus": [1, 3], "incolor": [1, 8], "indep": [1, 3], "index": [0, 1, 3], "indexerror": [1, 3], "indic": [1, 3], "indivdu": [1, 3], "individu": [1, 2, 3], "infer": [1, 8], "info": [1, 3], "info_tab": 6, "inform": [1, 2, 3], "init": 3, "input": [1, 2, 3, 8], "input_cr": [1, 3], "input_data": [1, 2, 3, 8], "input_param": [0, 1, 2, 3], "input_stream": [1, 3], "inputdt": [1, 8], "insert": [1, 3], "instanc": [1, 3], "instead": [1, 3], "instrument": [1, 3], "int": [1, 3, 8], "integ": [1, 3], "interepret": [1, 3], "interest": [1, 3], "interfac": [1, 3, 7], "intermin": [1, 8], "interpol": [1, 3], "interpolate_azimuth": [1, 3], "interpret": [1, 3], "interv": [1, 3], "inventori": [1, 3], "io": [1, 3], "item": [0, 1, 3], "iter": [1, 3], "its": [1, 3], "itself": [1, 3], "json": [1, 3], "jupyt": [1, 3, 4], "just": [1, 2, 3, 8], "keep": [1, 3], "kei": [0, 1, 3, 6], "keyword": [1, 2, 3], "kind": [1, 3], "konno": [1, 3], "konno_ohmachi_smooth": [1, 3], "konnoohmachismooth": [1, 3], "kwarg": [1, 3], "kwargskei": [1, 3], "kwargsvalu": [1, 3], "larger": [1, 3], "last": [1, 3, 8], "later": [1, 3], "latest": [1, 3], "latitut": [1, 3], "least": [1, 3], "left": [1, 3], "legend": [1, 3], "length": [1, 3, 8], "librari": [1, 3], "lightli": [1, 3], "like": [1, 3, 7], "limit": [1, 3], "line": [1, 2, 3], "link": [1, 3], "list": [1, 2, 3], "list_of_tz_database_time_zon": [1, 3], "lite": [1, 3], "littl": [1, 3], "load": [1, 3], "loc": [1, 3], "local": [1, 8], "locat": [1, 3], "log_errormsg": [1, 7], "logmsg": 7, "long": [1, 3], "longer": [1, 3], "longitud": [1, 3], "lot": [1, 3], "low": [1, 3], "lower": [1, 3], "lta": [1, 3], "m": [1, 3], "machin": [1, 3], "mai": [1, 2, 3], "main": [0, 1, 2, 3], "make": [1, 3, 8], "make_it_classi": [0, 1, 8], "manipul": [1, 3], "manual": [1, 3], "manual_label_upd": [1, 7], "map": [1, 3], "mark": [1, 8], "master": 7, "match": [1, 3], "matplotlib": [1, 3], "max": [1, 3], "maximum": [1, 3], "mean": [1, 3], "measruement": [1, 3], "measur": [1, 3], "median": [1, 3], "metadata": [1, 3], "metapath": [1, 3], "meter": [1, 3], "method": [1, 3, 7, 8], "micro": [1, 3], "min": [1, 3], "min_win_s": [1, 3], "minimum": [1, 3], "minumum": [1, 3], "mm": [1, 3], "modifi": [1, 3], "modul": [0, 1], "monospac": [1, 3], "more": [1, 2, 3], "most": [1, 3], "mpl": [1, 3], "mseed": [1, 3], "much": [1, 3], "mult": [1, 3], "multi": [1, 3], "multipl": [1, 2, 3], "must": [1, 3, 7], "myshak": [1, 8], "n": [1, 3], "name": [1, 3], "nearing": [1, 3], "necessari": [1, 3], "need": [1, 2, 3], "nest": [1, 3], "network": [1, 3], "new": [1, 3], "nois": [1, 3], "noise_perc": [1, 3], "noise_remov": [1, 3], "noisi": [1, 3], "none": [1, 3, 5, 6, 8], "normal": [1, 3], "north": [1, 3], "note": [1, 3, 7], "notebook": 4, "noth": [1, 3], "now": [1, 3, 8], "number": [1, 3], "numer": [1, 3], "obejct": [1, 3], "obj": [1, 3, 8], "object": [1, 3, 7, 8], "obpsi": 3, "obspi": [1, 3], "obtain": [1, 3], "occur": [1, 3], "off": [1, 3], "offset": [1, 3, 8], "often": [1, 3], "ohmachi": [1, 3], "on_clos": [0, 1, 7], "on_file_upload": [0, 1, 6], "on_run_data": [0, 1, 6], "one": [1, 3], "onli": [1, 2, 3], "open": [1, 3], "oper": [1, 3], "opt": [1, 8], "option": [1, 3], "order": [1, 3], "org": [1, 3], "origin": [1, 3], "other": [1, 2, 3, 8], "other_kwarg": [1, 3], "otherwis": [1, 3], "ouput": [1, 3], "out": [1, 3], "outlier": [1, 3], "outlier_curve_rmse_percentil": [1, 3], "outlier_curves_remov": [1, 3], "output": [1, 3, 8], "output_cr": [1, 3], "outputtimeobj": [1, 8], "outsid": [1, 3], "over": [1, 3], "overwrit": [1, 3], "p": [1, 3, 5], "packag": [0, 3], "pad": [1, 3], "page": [0, 1, 3], "pair": [1, 3], "panda": [1, 3], "param": [0, 1, 3, 4], "param_col": [1, 3], "param_nam": 2, "paramet": [1, 2, 3, 8], "parametersin": [1, 3], "pars": [1, 3], "parse_comp_plot_list": [0, 1, 5], "parse_hv_plot_list": [0, 1, 5], "parse_plot_str": [0, 1, 5], "parse_spec_plot_list": [0, 1, 5], "part": [1, 3], "pass": [1, 2, 3], "password": [1, 8], "past": [1, 3], "path": [1, 3, 8], "pathlib": [1, 3, 8], "paz": [1, 3], "pdf": [1, 3], "peak": [1, 3], "peak_freq_rang": [1, 3], "peak_select": [1, 3], "pend": [1, 3], "per": [1, 3], "percentag": [1, 3], "percentil": [1, 3], "perform": [1, 3], "period_step_octav": [1, 3], "persist": [1, 3], "pick": [1, 3], "pickl": [1, 3], "pipelin": [1, 3], "platform": [1, 8], "plot": [0, 1, 2, 3], "plot_azimuth": [0, 1, 3], "plot_azimuth_kwarg": [1, 3], "plot_engin": [1, 3, 5], "plot_hvsr": [0, 1, 3], "plot_input_stream": [1, 3], "plot_outlier_curv": [0, 1, 5], "plot_preview": [0, 1, 5], "plot_report": [1, 3], "plot_result": [0, 1, 5], "plot_str": [1, 5], "plot_stream": [0, 1, 3], "plot_typ": [1, 3], "plot_with_hv": 5, "plotli": [1, 3, 5], "plty": [1, 3], "plu": [1, 3], "png": [1, 3], "point": [1, 3], "polynomi": [1, 3], "possibl": [1, 3], "potenti": [1, 3], "power": [1, 3], "ppsd": [0, 1, 3, 8], "ppsd_kwarg": [1, 3], "ppsd_length": [1, 3], "ppsds_obspi": [0, 1, 3], "precis": [1, 3], "prefer": [1, 3], "prefix": [1, 3], "present": [1, 3], "prevent": 3, "preview_fig": [1, 5], "previou": [1, 3], "primarili": [1, 3], "print": [1, 2, 3], "print_param": [0, 1, 6], "print_report": [1, 3], "privat": [1, 3, 8], "probabilist": [1, 3], "proc": [1, 3], "proc_nam": [1, 8], "process": [1, 2, 3, 8], "process_data": [0, 1, 5], "process_hvsr": [0, 1, 3], "processing_paramet": [1, 3], "processing_window": [1, 3], "profil": [1, 3], "program": 7, "progress": 3, "prohibit": [1, 3], "project": [1, 3], "proper": [1, 3, 8], "properti": [1, 3], "proport": [1, 3], "provid": [1, 3], "psd": [1, 3], "pt": [1, 3], "purpos": [1, 3], "pylot": 3, "pyplot": [1, 3], "pyproj": [1, 3], "python": [1, 3], "quadrat": [1, 3], "qualiti": [1, 3], "r": [1, 3, 8], "rac84": [1, 3, 8], "rad": [1, 3], "radial": [1, 3], "radian": [1, 3], "rais": [1, 3, 8], "raise_error": [1, 8], "rang": [1, 3], "rasp": [1, 3], "raspberri": [1, 3], "rather": [1, 3], "ratio": 1, "raw": [1, 3], "read": [1, 3], "read_csv": [1, 3], "read_data": [0, 1, 5], "read_from_r": [0, 1, 8], "read_inventori": [1, 3], "read_inventory_kwarg": [1, 3], "read_tromino_fil": [0, 1, 3], "readabl": [1, 3], "readcsv_getmeta_fetch_kwarg": [1, 3], "readthedoc": [1, 3], "reboot_app": [0, 1, 7], "recommend": [1, 3], "refer": [1, 3], "regular": [1, 3], "relev": 3, "remov": [1, 3], "remove_method": [1, 3], "remove_nois": [0, 1, 3], "remove_outlier_curv": [0, 1, 3], "remove_raw_nois": [1, 3], "renam": [1, 3], "renov": [1, 3], "repeat": [1, 3], "report": [0, 1, 2, 3], "report_export_format": [1, 3], "report_export_path": [1, 3], "report_format": [1, 2, 3], "repositori": [1, 3], "repres": [1, 3], "reprocess": [1, 3], "requir": [1, 2, 3], "resampl": [1, 3], "resourc": [1, 3], "respect": [1, 3], "rest": [1, 3], "restart": 7, "result": [1, 3], "results_fig": [1, 5], "results_graph_widget": [1, 5], "retain": [1, 3], "return": [1, 3, 7, 8], "return_fig": [1, 3, 5], "rmse": [1, 3], "rmse_thresh": [1, 3, 5], "root": [1, 3], "row": [1, 3], "run": [0, 1, 2, 3, 8], "runtimeerror": [1, 3], "s0": [1, 3], "s1": [1, 3], "same": [1, 3, 8], "sampl": [1, 3], "sample1": [1, 3], "sample_list": [1, 8], "sampling_r": [1, 3], "sat_perc": [1, 3], "satur": [1, 3], "save": [1, 3, 7], "save_dir": [1, 3], "save_progress": [1, 8], "save_suffix": [1, 3], "savgoi": [1, 3], "score": [1, 3], "scp": [1, 3, 8], "script": [2, 7], "search": [0, 1, 3], "second": [1, 3], "see": [1, 2, 3], "seismic": [1, 3], "seismomet": [1, 3], "select": [1, 3], "sensit": [1, 3], "separ": [1, 3], "sequenc": [1, 3], "sesam": [1, 3], "set": [1, 3, 8], "settings_import_path": [1, 3], "settings_import_typ": [1, 3], "setup_session_st": [0, 1, 6], "sever": [1, 3], "shake": [1, 3], "shakem": [1, 8], "shakenam": [1, 8], "shallow": [1, 3], "short": [1, 3], "shorthand": [1, 3], "should": [1, 2, 3], "show": [1, 3], "show_az_plot": [1, 3], "show_azimuth_grid": [1, 3], "show_azimuth_peak": [1, 3], "show_html_report": [1, 3], "show_legend": [1, 3], "show_pdf_report": [1, 3], "show_plot": [1, 3, 5], "show_plot_report": [1, 3], "show_print_report": [1, 3], "show_report": 3, "show_report_output": [1, 3], "show_results_plot": [1, 5], "show_stalta_plot": [1, 3], "show_table_report": [1, 3], "show_typ": 6, "shown": [1, 3], "signal": [1, 3], "similar": [1, 3], "simpl": [1, 3], "simpli": [1, 3], "sin": [1, 3], "sinc": [1, 3, 8], "sing": [1, 3], "singl": [1, 3], "site": [1, 3], "site_nam": [1, 3], "sitenam": [1, 3], "size": [1, 3], "skip_on_gap": [1, 3], "sleep_tim": [1, 8], "smooth": [1, 3], "smoother": [1, 3], "sn": [1, 3], "so": [1, 3, 8], "some": [1, 3], "sourc": [1, 2, 3, 4, 5, 6, 7, 8], "south": [1, 3], "space": [1, 3], "spec": [1, 3, 5], "spec_plot_list": 5, "specif": [1, 3], "specifi": [1, 3], "spectral": [1, 3], "spectral_estim": [1, 3], "spectrogram": [1, 3], "spectrogram_compon": [1, 5], "spectrum": [1, 3], "speed": [1, 3], "spline": [1, 3], "sprit_app": [0, 1, 7], "sprit_cli": [0, 1], "sprit_hvsr": [0, 1, 2], "sprit_jupyter_ui": [0, 1], "sprit_plot": [0, 1], "sprit_streamlit_ui": [0, 1], "sprit_tkinter_ui": [0, 1], "sprit_util": [0, 1], "squar": [1, 3], "src": [1, 8], "ss": [1, 3], "sta": [1, 3, 8], "stalta": [1, 3], "stalta_thresh": [1, 3], "standard": [1, 3], "start": [1, 3], "start_byt": [1, 3], "starttim": [1, 3], "state": [1, 8], "statement": [1, 3], "station": [1, 3, 8], "statist": [1, 3], "statu": [1, 3], "step": [1, 3, 8], "still": [1, 3], "str": [1, 3, 8], "stream": [1, 3, 5, 8], "strftime": [1, 3], "string": [1, 3, 8], "struct_format": [1, 3], "style": [1, 3], "subfold": [1, 3], "submodul": 0, "subplot": [1, 3], "subplot_num": 5, "suffix": [1, 3], "summari": [1, 3], "summat": [1, 3], "suppli": [1, 3], "support": [1, 3, 8], "suppress_report_output": [1, 3], "sure": [1, 3, 8], "surfac": [1, 3], "system": [1, 3], "t": [1, 3], "tabl": [1, 3], "table_report": [1, 3], "tabular": [1, 3], "take": [1, 3], "techniqu": 1, "term": [1, 3], "termin": [1, 3, 8], "test": [1, 3], "test12": [1, 3], "test_funct": [0, 1, 3], "text": [1, 3], "text_chang": [0, 1, 6], "than": [1, 3], "thei": [1, 3, 8], "themselv": [1, 3], "thi": [1, 2, 3, 7, 8], "though": [1, 2, 3], "three": [1, 3], "threshold": [1, 3], "through": [1, 3], "throughout": [1, 3], "tigher": [1, 3], "time": [1, 2, 3, 8], "time_int": [1, 3], "time_it": [0, 1, 8], "timezon": [1, 3, 8], "titl": [1, 3], "tkinter": 7, "todai": [1, 8], "too": [1, 3], "tp": [1, 3], "trace": [1, 3], "transform": [1, 3], "tri": [1, 8], "triangular": [1, 3], "trigger": [1, 3], "trim": [1, 3], "tromino": [1, 3], "true": [1, 3, 5, 6, 8], "try": [1, 3], "tupl": [1, 3], "two": [1, 3], "txt": [1, 3], "type": [1, 2, 3], "tz": [1, 3], "tzone": [1, 3, 8], "ui": 4, "underli": [1, 3], "uniqu": [1, 3], "unit": [1, 3, 8], "updat": [1, 3], "update_metadata": [1, 3], "upper": [1, 3], "us": [1, 2, 3, 8], "use_hv_curv": [1, 3, 5], "use_percentil": [1, 3, 5], "use_subplot": [1, 3], "user": [1, 3, 7], "usernam": [1, 8], "usual": [1, 3], "utc": [1, 3, 8], "utcdatetim": [1, 3], "v": [1, 3], "valei": [1, 3], "valid": [1, 3, 8], "valu": [1, 3], "var": [1, 8], "var_typ": [1, 8], "variabl": [1, 3], "varieti": [1, 3], "variou": [1, 3], "vector": [1, 3], "verbos": [1, 3, 5, 6, 8], "veri": [1, 3], "vertic": [1, 3], "via": [1, 3], "visual": [1, 3], "vizual": [1, 3], "wa": [1, 3], "warm_cool": [1, 3], "warmup": [1, 3], "warmup_tim": [1, 3], "wb": [1, 3], "well": [1, 2, 3], "were": 3, "west": [1, 3], "what": [1, 3], "when": [1, 3], "where": [1, 3], "wherea": [1, 3], "whether": [1, 3], "which": [1, 3], "while": [1, 3], "whose": [1, 3], "wide": [1, 3], "widget": [1, 3, 4], "width": [1, 3], "wiki": [1, 3], "wikipedia": [1, 3], "window": [1, 3, 8], "within": [1, 2, 3], "without": [1, 3], "word": [1, 3], "work": [1, 2, 3], "workflow": [1, 3], "worksheet": [1, 3], "would": [1, 3], "wrapper": [1, 3], "write": [1, 3], "write_path": [1, 3], "write_to_info_tab": [0, 1, 6], "written": 3, "www": [1, 3], "x": [1, 3], "x_mark": [0, 1, 8], "x_valu": [1, 8], "xcoord": [1, 3], "y": [1, 3], "ycoord": [1, 3], "year": [1, 3, 8], "ylim_std": 3, "you": [1, 2, 3], "your": [1, 2, 3], "yyyi": [1, 3], "z": [1, 3, 5], "zero": [1, 3]}, "titles": ["Welcome to the documentation for SpRIT!", "sprit package", "sprit.sprit_cli module", "sprit.sprit_hvsr module", "sprit.sprit_jupyter_UI module", "sprit.sprit_plot module", "sprit.sprit_streamlit_ui module", "sprit.sprit_tkinter_ui module", "sprit.sprit_utils module"], "titleterms": {"content": 0, "document": 0, "indic": 0, "modul": [2, 3, 4, 5, 6, 7, 8], "packag": 1, "sprit": [0, 1, 2, 3, 4, 5, 6, 7, 8], "sprit_cli": 2, "sprit_hvsr": 3, "sprit_jupyter_ui": 4, "sprit_plot": 5, "sprit_streamlit_ui": 6, "sprit_tkinter_ui": 7, "sprit_util": 8, "submodul": 1, "tabl": 0, "welcom": 0}}) \ No newline at end of file diff --git a/docs/_build/html/sprit.html b/docs/_build/html/sprit.html deleted file mode 100644 index 7f9d032d..00000000 --- a/docs/_build/html/sprit.html +++ /dev/null @@ -1,1986 +0,0 @@ - - - - - - - sprit package — sprit 1.4 documentation - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -
-

sprit package

-

This module analysis ambient seismic data using the Horizontal to Vertical Spectral Ratio (HVSR) technique

-
-
-class sprit.HVSRBatch(*args, **kwargs)[source]
-

Bases: object

-

HVSRBatch is the data container used for batch processing. -It contains several HVSRData objects (one for each site). -These can be accessed using their site name, -either square brackets (HVSRBatchVariable[“SiteName”]) or the dot (HVSRBatchVariable.SiteName) accessor.

-

The dot accessor may not work if there is a space in the site name.

-

All of the functions in the sprit package are designed to perform the bulk of their operations iteratively -on the individual HVSRData objects contained in the HVSRBatch object, and do little with the HVSRBatch object itself, -besides using it determine which sites are contained within it.

-

Methods

- - - - - - - - - - - - - - - - - - - - - - - - - - - -

copy([type])

Make a copy of the HVSRBatch object.

export([hvsr_export_path, ext])

Method to export HVSRData objects in HVSRBatch container to indivdual .hvsr pickle files.

export_settings([site_name, ...])

Method to export settings from HVSRData object in HVSRBatch object.

get_report(**kwargs)

Method to get report from processed data, in print, graphical, or tabular format.

items()

Method to return both the site names and the HVSRData object as a set of dict_items tuples.

keys()

Method to return the "keys" of the HVSRBatch object.

plot(**kwargs)

Method to plot data, based on the sprit.plot_hvsr() function.

report(**kwargs)

Wrapper of get_report()

-
-
-copy(type='shallow')[source]
-

Make a copy of the HVSRBatch object. Uses python copy module.

-
-
Parameters:
-
-
typestr {‘shallow’, ‘deep’}

Based on input, creates either a shallow or deep copy of the HVSRBatch object. Shallow is equivalent of copy.copy(). Input of ‘deep’ is equivalent of copy.deepcopy() (still experimental). Defaults to shallow.

-
-
-
-
-
- -
-
-export(hvsr_export_path=True, ext='hvsr')[source]
-

Method to export HVSRData objects in HVSRBatch container to indivdual .hvsr pickle files.

-
-
Parameters:
-
-
hvsr_export_pathfilepath, default=True

Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). By default True. If True, it will first try to save each file to the same directory as input_data, then if that does not work, to the current working directory, then to the user’s home directory, by default True

-
-
extstr, optional

The extension to use for the output, by default ‘hvsr’. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension.

-
-
-
-
-
- -
-
-export_settings(site_name=None, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True)[source]
-

Method to export settings from HVSRData object in HVSRBatch object.

-

Simply calls sprit.export_settings() from specified HVSRData object in the HVSRBatch object. -See sprit.export_settings() for more details.

-
-
Parameters:
-
-
site_namestr, default=None

The name of the site whose settings should be exported. If None, will default to the first site, by default None.

-
-
export_settings_pathstr, optional

Filepath to output file. If left as ‘default’, will save as the default value in the resources directory. If that is not possible, will save to home directory, by default ‘default’

-
-
export_settings_typestr, {‘all’, ‘instrument’, ‘processing’}, optional

They type of settings to save, by default ‘all’

-
-
include_locationbool, optional

Whether to include the location information in the instrument settings, if that settings type is selected, by default False

-
-
verbosebool, optional

Whether to print output (filepath and settings) to terminal, by default True

-
-
-
-
-
-

See also

-
-
export_settings
-
-
-
- -
-
-get_report(**kwargs)[source]
-

Method to get report from processed data, in print, graphical, or tabular format.

-
-
Returns:
-
-
Variable

May return nothing, pandas.Dataframe, or pyplot Figure, depending on input.

-
-
-
-
-
-

See also

-
-
get_report
-
-
-
- -
-
-items()[source]
-

Method to return both the site names and the HVSRData object as a set of dict_items tuples. Functions similar to dict.items().

-
-
Returns:
-
-
_type_

_description_

-
-
-
-
-
- -
-
-keys()[source]
-

Method to return the “keys” of the HVSRBatch object. For HVSRBatch objects, these are the site names. Functions similar to dict.keys().

-
-
Returns:
-
-
dict_keys

A dict_keys object listing the site names of each of the HVSRData objects contained in the HVSRBatch object

-
-
-
-
-
- -
-
-plot(**kwargs)[source]
-

Method to plot data, based on the sprit.plot_hvsr() function.

-

All the same kwargs and default values apply as plot_hvsr(). -For return_fig, returns it to the ‘Plot_Report’ attribute of each HVSRData object

-
-
Returns:
-
-
_type_

_description_

-
-
-
-
-
-

See also

-
-
plot_hvsr
-
-
-
- -
-
-report(**kwargs)[source]
-

Wrapper of get_report()

-
-

See also

-
-
get_report
-
-
-
- -
- -
-
-class sprit.HVSRData(*args, **kwargs)[source]
-

Bases: object

-

HVSRData is the basic data class of the sprit package. -It contains all the processed data, input parameters, and reports.

-

These attributes and objects can be accessed using square brackets or the dot accessor. For example, to access the site name, HVSRData[‘site’] and HVSRData.site will both return the site name.

-

Some of the methods that work on the HVSRData object (e.g., .plot() and .get_report()) are essentially wrappers for some of the main sprit package functions (sprit.plot_hvsr() and sprit.get_report(), respectively)

-
-
Attributes:
-
-
batch

Whether this HVSRData object is part of an HVSRBatch object.

-
-
datastream

A copy of the original obspy datastream read in.

-
-
params

Dictionary containing the parameters used to process the data

-
-
ppsds

Dictionary copy of the class object obspy.signal.spectral_estimation.PPSD().

-
-
ppsds_obspy

The original ppsd information from the obspy.signal.spectral_estimation.PPSD(), so as to keep original if copy is manipulated/changed.

-
-
-
-
-

Methods

- - - - - - - - - - - - - - - - - - - - - - - - - - - -

copy([type])

Make a copy of the HVSRData object.

export([hvsr_export_path, ext])

Method to export HVSRData objects to .hvsr pickle files.

export_settings([export_settings_path, ...])

Method to export settings from HVSRData object.

get_report(**kwargs)

Method to get report from processed data, in print, graphical, or tabular format.

items()

Method to return the "items" of the HVSRData object.

keys()

Method to return the "keys" of the HVSRData object.

plot(**kwargs)

Method to plot data, wrapper of sprit.plot_hvsr()

report(**kwargs)

Wrapper of get_report()

-
-
-property batch
-

Whether this HVSRData object is part of an HVSRBatch object. This is used throughout the code to help direct the object into the proper processing pipeline.

-
-
Returns:
-
-
bool

True if HVSRData object is part of HVSRBatch object, otherwise, False

-
-
-
-
-
- -
-
-copy(type='shallow')[source]
-

Make a copy of the HVSRData object. Uses python copy module.

-
-
Parameters:
-
-
typestr {‘shallow’, ‘deep’}

Based on input, creates either a shallow or deep copy of the HVSRData object. Shallow is equivalent of copy.copy(). Input of type=’deep’ is equivalent of copy.deepcopy() (still experimental). Defaults to shallow.

-
-
-
-
-
- -
-
-property datastream
-

A copy of the original obspy datastream read in. This helps to retain the original data even after processing is carried out.

-
-
Returns:
-
-
obspy.core.Stream.stream

Obspy stream

-
-
-
-
-
- -
-
-export(hvsr_export_path=None, ext='hvsr')[source]
-

Method to export HVSRData objects to .hvsr pickle files.

-
-
Parameters:
-
-
hvsr_export_pathfilepath, default=True

Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). -By default True. -If True, it will first try to save each file to the same directory as input_data, then if that does not work, to the current working directory, then to the user’s home directory, by default True

-
-
extstr, optional

The extension to use for the output, by default ‘hvsr’. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension.

-
-
-
-
-
- -
-
-export_settings(export_settings_path='default', export_settings_type='all', include_location=False, verbose=True)[source]
-

Method to export settings from HVSRData object. Simply calls sprit.export_settings() from the HVSRData object. See sprit.export_settings() for more details.

-
-
Parameters:
-
-
export_settings_pathstr, optional

Filepath to output file. If left as ‘default’, will save as the default value in the resources directory. If that is not possible, will save to home directory, by default ‘default’

-
-
export_settings_typestr, {‘all’, ‘instrument’, ‘processing’}, optional

They type of settings to save, by default ‘all’

-
-
include_locationbool, optional

Whether to include the location information in the instrument settings, if that settings type is selected, by default False

-
-
verbosebool, optional

Whether to print output (filepath and settings) to terminal, by default True

-
-
-
-
-
- -
-
-get_report(**kwargs)[source]
-

Method to get report from processed data, in print, graphical, or tabular format.

-
-
Returns:
-
-
Variable

May return nothing, pandas.Dataframe, or pyplot Figure, depending on input.

-
-
-
-
-
-

See also

-
-
get_report
-
-
-
- -
-
-items()[source]
-

Method to return the “items” of the HVSRData object. For HVSRData objects, this is a dict_items object with the keys and values in tuples. Functions similar to dict.items().

-
-
Returns:
-
-
dict_items

A dict_items object of the HVSRData objects attributes, parameters, etc.

-
-
-
-
-
- -
-
-keys()[source]
-

Method to return the “keys” of the HVSRData object. For HVSRData objects, these are the attributes and parameters of the object. Functions similar to dict.keys().

-
-
Returns:
-
-
dict_keys

A dict_keys object of the HVSRData objects attributes, parameters, etc.

-
-
-
-
-
- -
-
-property params
-

Dictionary containing the parameters used to process the data

-
-
Returns:
-
-
dict

Dictionary containing the process parameters

-
-
-
-
-
- -
-
-plot(**kwargs)[source]
-

Method to plot data, wrapper of sprit.plot_hvsr()

-
-
Returns:
-
-
matplotlib.Figure, matplotlib.Axis (if return_fig=True)
-
-
-
-
-

See also

-
-
plot_hvsr
-
plot_azimuth
-
-
-
- -
-
-property ppsds
-

Dictionary copy of the class object obspy.signal.spectral_estimation.PPSD(). The dictionary copy allows manipulation of the data in PPSD, whereas that data cannot be easily manipulated in the original Obspy object.

-
-
Returns:
-
-
dict

Dictionary copy of the PPSD information from generate_ppsds()

-
-
-
-
-
- -
-
-property ppsds_obspy
-

The original ppsd information from the obspy.signal.spectral_estimation.PPSD(), so as to keep original if copy is manipulated/changed.

-
- -
-
-report(**kwargs)[source]
-

Wrapper of get_report()

-
-

See also

-
-
get_report
-
-
-
- -
- -
-
-sprit.assert_check(var, cond=None, var_type=None, error_message='Output not valid', verbose=False)[source]
-
- -
-
-sprit.batch_data_read(batch_data, batch_type='table', param_col=None, batch_params=None, verbose=False, **readcsv_getMeta_fetch_kwargs)[source]
-

Function to read data in data as a batch of multiple data files. This is best used through sprit.fetch_data(*args, source=’batch’, **other_kwargs).

-
-
Parameters:
-
-
batch_datafilepath or list

Input data information for how to read in data as batch. Can be filepath or list of filepaths/stream objects. -If filepath, should point to .csv (or similar that can be read by pandas.read_csv()) with batch data information.

-
-
batch_typestr, optional

Type of batch read, only ‘table’ and ‘filelist’ accepted. -If ‘table’, will read data from a file read in using pandas.read_csv(), by default ‘table’

-
-
param_colNone or str, optional

Name of parameter column from batch information file. Only used if a batch_type=’table’ and single parameter column is used, rather than one column per parameter (for single parameter column, parameters are formatted with = between keys/values and , between item pairs), by default None

-
-
batch_paramslist, dict, or None, default = None

Parameters to be used if batch_type=’filelist’. If it is a list, needs to be the same length as batch_data. If it is a dict, will be applied to all files in batch_data and will combined with extra keyword arguments caught by **readcsv_getMeta_fetch_kwargs.

-
-
verbosebool, optional

Whether to print information to terminal during batch read, by default False

-
-
**readcsv_getMeta_fetch_kwargs

Keyword arguments that will be read into pandas.read_csv(), sprit.input_params, sprit.get_metadata(), and/or sprit.fetch_data()

-
-
-
-
Returns:
-
-
hvsrBatch

HVSRBatch object with each item representing a different HVSRData object

-
-
-
-
Raises:
-
-
IndexError

_description_

-
-
-
-
-
- -
-
-sprit.calculate_azimuth(hvsr_data, azimuth_angle=30, azimuth_type='multiple', azimuth_unit='degrees', show_az_plot=False, verbose=False, **plot_azimuth_kwargs)[source]
-

Function to calculate azimuthal horizontal component at specified angle(s). Adds each new horizontal component as a radial component to obspy.Stream object at hvsr_data[‘stream’]

-
-
Parameters:
-
-
hvsr_dataHVSRData

Input HVSR data

-
-
azimuth_angleint, default=10

If azimuth_type=’multiple’, this is the angular step (in unit azimuth_unit) of each of the azimuthal measurements. -If azimuth_type=’single’ this is the angle (in unit azimuth_unit) of the single calculated azimuthal measruement. By default 10.

-
-
azimuth_typestr, default=’multiple’

What type of azimuthal measurement to make, by default ‘multiple’. -If ‘multiple’ (or {‘multi’, ‘mult’, ‘m’}), will take a measurement at each angular step of azimuth_angle of unit azimuth_unit. -If ‘single’ (or {‘sing’, ‘s’}), will take a single azimuthal measurement at angle specified in azimuth_angle.

-
-
azimuth_unitstr, default=’degrees’

Angular unit used to specify azimuth_angle parameter. By default ‘degrees’. -If ‘degrees’ (or {‘deg’, ‘d’}), will use degrees. -If ‘radians’ (or {‘rad’, ‘r’}), will use radians.

-
-
show_az_plotbool, default=False

Whether to show azimuthal plot, by default False.

-
-
verbosebool, default=False

Whether to print terminal output, by default False

-
-
-
-
Returns:
-
-
HVSRData

Updated HVSRData object specified in hvsr_data with hvsr_data[‘stream’] attribute containing additional components (EHR-*), -with * being zero-padded (3 digits) azimuth angle in degrees.

-
-
-
-
-
- -
-
-sprit.catch_errors(func)[source]
-
- -
-
-sprit.check_gui_requirements()[source]
-
- -
-
-sprit.check_mark(incolor=False, interminal=False)[source]
-

The default Windows terminal is not able to display the check mark character correctly. -This function returns another displayable character if platform is Windows

-
- -
-
-sprit.check_peaks(hvsr_data, hvsr_band=[0.4, 40], peak_selection='max', peak_freq_range=[0.4, 40], azimuth='HV', verbose=False)[source]
-

Function to run tests on HVSR peaks to find best one and see if it passes quality checks

-
-
Parameters:
-
-
hvsr_datadict

Dictionary containing all the calculated information about the HVSR data (i.e., hvsr_out returned from process_hvsr)

-
-
hvsr_bandtuple or list, default=[0.4, 40]

2-item tuple or list with lower and upper limit of frequencies to analyze

-
-
peak_selectionstr or numeric, default=’max’

How to select the “best” peak used in the analysis. For peak_selection=”max” (default value), the highest peak within peak_freq_range is used. -For peak_selection=’scored’, an algorithm is used to select the peak based in part on which peak passes the most SESAME criteria. -If a numeric value is used (e.g., int or float), this should be a frequency value to manually select as the peak of interest.

-
-
peak_freq_rangetuple or list, default=[0.4, 40];

The frequency range within which to check for peaks. If there is an HVSR curve with multiple peaks, this allows the full range of data to be processed while limiting peak picks to likely range.

-
-
verbosebool, default=False

Whether to print results and inputs to terminal.

-
-
-
-
Returns:
-
-
hvsr_dataHVSRData or HVSRBatch object

Object containing previous input data, plus information about peak tests

-
-
-
-
-
- -
-
-sprit.check_tsteps(hvsr_data)[source]
-

Check time steps of PPSDS to make sure they are all the same length

-
- -
-
-sprit.check_xvalues(ppsds)[source]
-

Check x_values of PPSDS to make sure they are all the same length

-
- -
-
-sprit.checkifpath(filepath, sample_list='', verbose=False, raise_error=False)[source]
-

Support function to check if a filepath is a pathlib.Path object and tries to convert if not

-
-
Parameters:
-
-
filepathstr or pathlib.Path, or anything

Filepath to check. If not a valid filepath, will not convert and raises error

-
-
-
-
Returns:
-
-
filepathpathlib.Path

pathlib.Path of filepath

-
-
-
-
-
- -
-
-sprit.create_jupyter_ui()[source]
-
- -
-
-sprit.export_data(hvsr_data, hvsr_export_path=None, ext='hvsr', verbose=False)[source]
-

Export data into pickle format that can be read back in using import_data() so data does not need to be processed each time. -Default extension is .hvsr but it is still a pickled file that can be read in using pickle.load().

-
-
Parameters:
-
-
hvsr_dataHVSRData or HVSRBatch

Data to be exported

-
-
hvsr_export_pathstr or filepath object, default = None

String or filepath object to be read by pathlib.Path() and/or a with open(hvsr_export_path, ‘wb’) statement. If None, defaults to input input_data directory, by default None

-
-
extstr, default = ‘hvsr’

Filepath extension to use for data file, by default ‘hvsr’

-
-
-
-
-
- -
-
-sprit.export_settings(hvsr_data, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True)[source]
-

Save settings to json file

-
-
Parameters:
-
-
export_settings_pathstr, default=”default”

Where to save the json file(s) containing the settings, by default ‘default’. -If “default,” will save to sprit package resources. Otherwise, set a filepath location you would like for it to be saved to. -If ‘all’ is selected, a directory should be supplied. -Otherwise, it will save in the directory of the provided file, if it exists. Otherwise, defaults to the home directory.

-
-
export_settings_typestr, {‘all’, ‘instrument’, ‘processing’}

What kind of settings to save. -If ‘all’, saves all possible types in their respective json files. -If ‘instrument’, save the instrument settings to their respective file. -If ‘processing’, saves the processing settings to their respective file. By default ‘all’

-
-
include_locationbool, default=False, input CRS

Whether to include the location parametersin the exported settings document.This includes xcoord, ycoord, elevation, elev_unit, and input_crs

-
-
verbosebool, default=True

Whether to print outputs and information to the terminal

-
-
-
-
-
- -
-
-sprit.fetch_data(params, source='file', data_export_path=None, data_export_format='mseed', detrend='spline', detrend_order=2, update_metadata=True, plot_input_stream=False, plot_engine='matplotlib', show_plot=True, verbose=False, **kwargs)[source]
-

Fetch ambient seismic data from a source to read into obspy stream

-
-
Parameters:
-
-
paramsdict
-
Dictionary containing all the necessary params to get data.

Parameters defined using input_params() function.

-
-
-
-
sourcestr, {‘raw’, ‘dir’, ‘file’, ‘batch’}
-
String indicating where/how data file was created. For example, if raw data, will need to find correct channels.

‘raw’ finds raspberry shake data, from raw output copied using scp directly from Raspberry Shake, either in folder or subfolders; -‘dir’ is used if the day’s 3 component files (currently Raspberry Shake supported only) are all 3 contained in a directory by themselves. -‘file’ is used if the params[‘input_data’] specified in input_params() is the direct filepath to a single file to be read directly into an obspy stream. -‘batch’ is used to read a list or specified set of seismic files.

-
-

Most commonly, a csv file can be read in with all the parameters. Each row in the csv is a separate file. Columns can be arranged by parameter.

-
-
-
-
-
data_export_pathNone or str or pathlib obj, default=None

If None (or False), data is not trimmed in this function. -Otherwise, this is the directory to save trimmed and exported data.

-
-
data_export_format: str=’mseed’

If data_export_path is not None, this is the format in which to save the data

-
-
detrendstr or bool, default=’spline’

If False, data is not detrended. -Otherwise, this should be a string accepted by the type parameter of the obspy.core.trace.Trace.detrend method: https://docs.obspy.org/packages/autogen/obspy.core.trace.Trace.detrend.html

-
-
detrend_orderint, default=2

If detrend parameter is ‘spline’ or ‘polynomial’, this is passed directly to the order parameter of obspy.core.trace.Trace.detrend method.

-
-
update_metadatabool, default=True

Whether to update the metadata file, used primarily with Raspberry Shake data which uses a generic inventory file.

-
-
plot_input_streambool, default=False

Whether to plot the raw input stream. This plot includes a spectrogram (Z component) and the raw (with decimation for speed) plots of each component signal.

-
-
plot_enginestr, default=’matplotlib’

Which plotting library/engine to use for plotting the Input stream. Options are ‘matplotlib’, ‘plotly’, or ‘obspy’ (not case sensitive).

-
-
verbosebool, default=False

Whether to print outputs and inputs to the terminal

-
-
**kwargs

Keywords arguments, primarily for ‘batch’ and ‘dir’ sources

-
-
-
-
Returns:
-
-
paramsHVSRData or HVSRBatch object

Same as params parameter, but with an additional “stream” attribute with an obspy data stream with 3 traces: Z (vertical), N (North-south), and E (East-west)

-
-
-
-
-
- -
-
-sprit.format_time(inputDT, tzone='UTC')[source]
-

Private function to format time, used in other functions

-

Formats input time to datetime objects in utc

-
-
Parameters:
-
-
inputDTstr or datetime obj

Input datetime. Can include date and time, just date (time inferred to be 00:00:00.00) or just time (if so, date is set as today)

-
-
tzonestr=’utc’ or int {‘utc’, ‘local’}
-
Timezone of data entry.

If string and not utc, assumed to be timezone of computer running the process. -If int, assumed to be offset from UTC (e.g., CST in the United States is -6; CDT in the United States is -5)

-
-
-
-
-
-
Returns:
-
-
outputTimeObjdatetime object in UTC

Output datetime.datetime object, now in UTC time.

-
-
-
-
-
- -
-
-sprit.generate_ppsds(hvsr_data, azimuthal_ppsds=False, verbose=False, **ppsd_kwargs)[source]
-

Generates PPSDs for each channel

-

Channels need to be in Z, N, E order -Info on PPSD creation here: https://docs.obspy.org/packages/autogen/obspy.signal.spectral_estimation.PPSD.html

-
-
Parameters:
-
-
hvsr_datadict, HVSRData object, or HVSRBatch object

Data object containing all the parameters and other data of interest (stream and paz, for example)

-
-
azimuthal_ppsdsbool, default=False

Whether to generate PPSDs for azimuthal data

-
-
verbosebool, default=True

Whether to print inputs and results to terminal

-
-
**ppsd_kwargsdict

Dictionary with keyword arguments that are passed directly to obspy.signal.PPSD. -If the following keywords are not specified, their defaults are amended in this function from the obspy defaults for its PPSD function. Specifically:

-
-
    -
  • ppsd_length defaults to 30 (seconds) here instead of 3600

  • -
  • skip_on_gaps defaults to True instead of False

  • -
  • period_step_octaves defaults to 0.03125 instead of 0.125

  • -
-
-
-
-
-
Returns:
-
-
ppsdsHVSRData object

Dictionary containing entries with ppsds for each channel

-
-
-
-
-
- -
-
-sprit.get_char(in_char)[source]
-

Outputs character with proper encoding/decoding

-
- -
-
-sprit.get_metadata(params, write_path='', update_metadata=True, source=None, **read_inventory_kwargs)[source]
-

Get metadata and calculate or get paz parameter needed for PPSD

-
-
Parameters:
-
-
paramsdict
-
Dictionary containing all the input and other parameters needed for processing

Ouput from input_params() function

-
-
-
-
write_pathstr
-
String with output filepath of where to write updated inventory or metadata file

If not specified, does not write file

-
-
-
-
update_metadatabool

Whether to update the metadata file itself, or just read as-is. If using provided raspberry shake metadata file, select True.

-
-
sourcestr, default=None

This passes the source variable value to _read_RS_metadata. It is expected that this is passed directly from the source parameter of sprit.fetch_data()

-
-
-
-
Returns:
-
-
paramsdict

Modified input dictionary with additional key:value pair containing paz dictionary (key = “paz”)

-
-
-
-
-
- -
-
-sprit.get_report(hvsr_results, report_formats=['print', 'table', 'plot', 'html', 'pdf'], azimuth='HV', plot_type='HVSR p ann C+ p ann Spec p ann', plot_engine='matplotlib', show_print_report=True, show_table_report=False, show_plot_report=True, show_html_report=False, show_pdf_report=True, suppress_report_outputs=False, show_report_outputs=False, csv_handling='append', report_export_format=None, report_export_path=None, verbose=False, **kwargs)[source]
-

Generate and/or print and/or export a report of the HVSR analysis in a variety of formats.

-

Formats include: -* ‘print’: A (monospace) text summary of the HVSR results -* ‘table’: A pandas.DataFrame summary of the HVSR Results.

-
-

This is useful for copy/pasting directly into a larger worksheet.

-
-
    -
  • ‘plot’: A plot summary of the HVSR results, generated using the plot_hvsr() function.

  • -
  • ‘html’: An HTML document/text of the HVSR results. This includes the table, print, and plot reports in one document.

  • -
  • -
    ‘pdf’: A PDF document showing the summary of the HVSR Results.

    The PDF report is simply the HTML report saved to an A4-sized PDF document.

    -
    -
    -
  • -
-
-
Parameters:
-
-
hvsr_resultsdict

Dictionary containing all the information about the processed hvsr data

-
-
report_formats{‘table’, ‘print’, plot}

Format in which to print or export the report. -The following report_formats return the following items in the following attributes:

-
-
    -
  • ‘plot’: hvsr_results[‘Print_Report’] as a str

  • -
  • ‘print’: hvsr_results[‘HV_Plot’] - matplotlib.Figure object

  • -
  • -
    ‘table’: hvsr_results[‘Table_Report’]- pandas.DataFrame object
      -
    • list/tuple - a list or tuple of the above objects, in the same order they are in the report_formats list

    • -
    -
    -
    -
  • -
  • ‘html’: hvsr_results[‘HTML_Report’] - a string containing the text for an HTML document

  • -
  • ‘pdf’: currently does not save to the HVSRData object itself, can only be saved to the disk directly

  • -
-
-
-
plot_typestr, default = ‘HVSR p ann C+ p ann Spec

What type of plot to plot, if ‘plot’ part of report_formats input

-
-
azimuthstr, default = ‘HV’

Which azimuth to plot, by default “HV” which is the main “azimuth” combining the E and N components

-
-
csv_handlingstr, {‘append’, ‘overwrite’, ‘keep/rename’}

How to handle table report outputs if the designated csv output file already exists. By default, appends the new information to the end of the existing file.

-
-
suppress_report_outputsbool, default=False

If True, only reads output to appropriate attribute of data class (ie, print does not print, only reads text into variable). If False, performs as normal.

-
-
report_export_formatlist or str, default=[‘pdf’]

A string or list of strings indicating which report formats should be exported to disk.

-
-
report_export_pathNone, bool, or filepath, default = None

If None or False, does not export; if True, will export to same directory as the input_data parameter in the input_params() function. -Otherwise, it should be a string or path object indicating where to export results. May be a file or directory. -If a directory is specified, the filename will be “<site_name>_<acq_date>_<UTC start time>-<UTC end time>”. -The extension/suffix defaults to png for report_formats=”plot”, csv for ‘table’, txt for ‘print’, html for ‘html’, and pdf for ‘pdf.’

-
-
verbosebool, default=True

Whether to print the results to terminal. This is the same output as report_formats=’print’, and will not repeat if that is already selected

-
-
-
-
Returns:
-
-
sprit.HVSRData
-
-
-
-
- -
-
-sprit.gui(kind='browser')[source]
-

Function to open a graphical user interface (gui)

-
-
Parameters:
-
-
kindstr, optional

What type of gui to open. “default” opens regular windowed interface, -“widget” opens jupyter widget’ -“lite” open lite (pending update), by default ‘default’

-
-
-
-
-
- -
-
-sprit.has_required_channels(stream)[source]
-
- -
-
-sprit.import_data(import_filepath, data_format='pickle')[source]
-

Function to import .hvsr (or other extension) data exported using export_data() function

-
-
Parameters:
-
-
import_filepathstr or path object

Filepath of file created using export_data() function. This is usually a pickle file with a .hvsr extension

-
-
data_formatstr, default=’pickle’

Type of format data is in. Currently, only ‘pickle’ supported. Eventually, json or other type may be supported, by default ‘pickle’.

-
-
-
-
Returns:
-
-
HVSRData or HVSRBatch object
-
-
-
-
- -
-
-sprit.import_settings(settings_import_path, settings_import_type='instrument', verbose=False)[source]
-
- -
-
-sprit.input_params(input_data, site='HVSR Site', id_prefix=None, network='AM', station='RAC84', loc='00', channels=['EHZ', 'EHN', 'EHE'], acq_date='2024-10-30', starttime=UTCDateTime(2024, 10, 30, 0, 0), endtime=UTCDateTime(2024, 10, 30, 23, 59, 59, 999999), tzone='UTC', xcoord=-88.2290526, ycoord=40.1012122, elevation=755, input_crs=None, output_crs=None, elev_unit='meters', depth=0, instrument='Raspberry Shake', metapath=None, hvsr_band=[0.4, 40], peak_freq_range=[0.4, 40], processing_parameters={}, verbose=False)[source]
-

Function for designating input parameters for reading in and processing data

-
-
Parameters:
-
-
input_datastr or pathlib.Path object

Filepath of data. This can be a directory or file, but will need to match with what is chosen later as the source parameter in fetch_data()

-
-
sitestr, default=”HVSR Site”

Site name as designated by user for ease of reference. Used for plotting titles, filenames, etc.

-
-
id_prefixstr, default=None

A prefix that may be used to create unique identifiers for each site. -The identifier created is saved as the [‘HVSR_ID’] attribute of the HVSRData object, -and is equivalent to the following formatted string: -f”{id_prefix}-{acq_date.strftime(“%Y%m%d”)}-{starttime.strftime(“%H%M”)}-{station}”.

-
-
networkstr, default=’AM’

The network designation of the seismometer. This is necessary for data from Raspberry Shakes. ‘AM’ is for Amateur network, which fits Raspberry Shakes.

-
-
stationstr, default=’RAC84’

The station name of the seismometer. This is necessary for data from Raspberry Shakes.

-
-
locstr, default=’00’

Location information of the seismometer.

-
-
channelslist, default=[‘EHZ’, ‘EHN’, ‘EHE’]

The three channels used in this analysis, as a list of strings. Preferred that Z component is first, but not necessary

-
-
acq_datestr, int, date object, or datetime object

If string, preferred format is ‘YYYY-MM-DD’. -If int, this will be interpreted as the time_int of year of current year (e.g., 33 would be Feb 2 of current year) -If date or datetime object, this will be the date. Make sure to account for time change when converting to UTC (if UTC is the following time_int, use the UTC time_int).

-
-
starttimestr, time object, or datetime object, default=’00:00:00.00’

Start time of data stream. This is necessary for Raspberry Shake data in ‘raw’ form, or for trimming data. Format can be either ‘HH:MM:SS.micros’ or ‘HH:MM’ at minimum.

-
-
endtimestr, time obejct, or datetime object, default=’23:59:99.99’

End time of data stream. This is necessary for Raspberry Shake data in ‘raw’ form, or for trimming data. Same format as starttime.

-
-
tzonestr or int, default = ‘UTC’

Timezone of input data. If string, ‘UTC’ will use the time as input directly. Any other string value needs to be a TZ identifier in the IANA database, a wikipedia page of these is available here: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. -If int, should be the int value of the UTC offset (e.g., for American Eastern Standard Time: -5). -This is necessary for Raspberry Shake data in ‘raw’ format.

-
-
xcoordfloat, default=-88.2290526

Longitude (or easting, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in table output, but will likely be used in future for mapping/profile purposes.

-
-
ycoordfloat, default=40.1012122

Latitute (or northing, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in table output, but will likely be used in future for mapping/profile purposes.

-
-
input_crsstr or other format read by pyproj, default=’EPSG:4326’

Coordinate reference system of input data, as used by pyproj.CRS.from_user_input()

-
-
output_crsstr or other format read by pyproj, default=’EPSG:4326’

Coordinate reference system to which input data will be transformed, as used by pyproj.CRS.from_user_input()

-
-
elevationfloat, default=755

Surface elevation of data point. Not currently used (except in table output), but will likely be used in the future.

-
-
depthfloat, default=0

Depth of seismometer. Not currently used, but will likely be used in the future.

-
-
instrumentstr {‘Raspberry Shake’, “Tromino”}

Instrument from which the data was acquired.

-
-
metapathstr or pathlib.Path object, default=None

Filepath of metadata, in format supported by obspy.read_inventory. If default value of None, will read from resources folder of repository (only supported for Raspberry Shake).

-
-
hvsr_bandlist, default=[0.4, 40]

Two-element list containing low and high “corner” frequencies (in Hz) for processing. This can specified again later.

-
-
peak_freq_rangelist or tuple, default=[0.4, 40]

Two-element list or tuple containing low and high frequencies (in Hz) that are used to check for HVSR Peaks. This can be a tigher range than hvsr_band, but if larger, it will still only use the hvsr_band range.

-
-
processing_parameters={}dict or filepath, default={}

If filepath, should point to a .proc json file with processing parameters (i.e, an output from sprit.export_settings()). -Note that this only applies to parameters for the functions: ‘fetch_data’, ‘remove_noise’, ‘generate_ppsds’, ‘process_hvsr’, ‘check_peaks’, and ‘get_report.’ -If dictionary, dictionary containing nested dictionaries of function names as they key, and the parameter names/values as key/value pairs for each key. -If a function name is not present, or if a parameter name is not present, default values will be used. -For example:

-
-

{ ‘fetch_data’ : {‘source’:’batch’, ‘data_export_path’:”/path/to/trimmed/data”, ‘data_export_format’:’mseed’, ‘detrend’:’spline’, ‘plot_input_stream’:True, ‘verbose’:False, kwargs:{‘kwargskey’:’kwargsvalue’}} }

-
-
-
verbosebool, default=False

Whether to print output and results to terminal

-
-
-
-
Returns:
-
-
paramssprit.HVSRData

sprit.HVSRData class containing input parameters, including data file path and metadata path. This will be used as an input to other functions. If batch processing, params will be converted to batch type in fetch_data() step.

-
-
-
-
-
- -
-
-sprit.make_it_classy(input_data, verbose=False)[source]
-
- -
-
-sprit.parse_plot_string(plot_string)[source]
-
- -
-
-sprit.plot_azimuth(hvsr_data, fig=None, ax=None, show_azimuth_peaks=False, interpolate_azimuths=True, show_azimuth_grid=False, show_plot=True, **plot_azimuth_kwargs)[source]
-

Function to plot azimuths when azimuths are calculated

-
-
Parameters:
-
-
hvsr_dataHVSRData or HVSRBatch

HVSRData that has gone through at least the sprit.fetch_data() step, and before sprit.generate_ppsds()

-
-
show_azimuth_peaksbool, optional

Whether to display the peak value at each azimuth calculated on the chart, by default False

-
-
interpolate_azimuthsbool, optional

Whether to interpolate the azimuth data to get a smoother plot. -This is just for visualization, does not change underlying data. -It takes a lot of time to process the data, but interpolation for vizualization can happen fairly fast. By default True.

-
-
show_azimuth_gridbool, optional

Whether to display the grid on the chart, by default False

-
-
-
-
Returns:
-
-
matplotlib.Figure, matplotlib.Axis

Figure and axis of resulting azimuth plot

-
-
-
-
-
- -
-
-sprit.plot_hvsr(hvsr_data, plot_type='HVSR ann p C+ ann p SPEC ann p', azimuth='HV', use_subplots=True, fig=None, ax=None, return_fig=False, plot_engine='matplotlib', save_dir=None, save_suffix='', show_legend=False, show_plot=True, close_figs=False, clear_fig=True, **kwargs)[source]
-

Function to plot HVSR data

-
-
Parameters:
-
-
hvsr_datadict

Dictionary containing output from process_hvsr function

-
-
plot_typestr or list, default = ‘HVSR ann p C+ ann p SPEC ann p’

The plot_type of plot(s) to plot. If list, will plot all plots listed -- ‘HVSR’ - Standard HVSR plot, including standard deviation. Options are included below:

-
-
    -
  • ‘p’ shows a vertical dotted line at frequency of the “best” peak

  • -
  • ‘ann’ annotates the frequency value of of the “best” peak

  • -
  • ‘all’ shows all the peaks identified in check_peaks() (by default, only the max is identified)

  • -
  • ‘t’ shows the H/V curve for all time windows

  • -
  • ‘tp’ shows all the peaks from the H/V curves of all the time windows

  • -
  • ‘fr’ shows the window within which SpRIT will search for peak frequencies, as set by peak_freq_range

  • -
  • -
    ‘test’ shows a visualization of the results of the peak validity test(s). Examples:
      -
    • ‘tests’ visualizes the results of all the peak tests (not the curve tests)

    • -
    • -
      ‘test12’ shows the results of tests 1 and 2.
        -
      • Append any number 1-6 after ‘test’ to show a specific test result visualized

      • -
      -
      -
      -
    • -
    -
    -
    -
  • -
-
-
    -
  • -
    ‘COMP’ - plot of the PPSD curves for each individual component (“C” also works)
      -
    • ‘+’ (as a suffix in ‘C+’ or ‘COMP+’) plots C on a plot separate from HVSR (C+ is default, but without + will plot on the same plot as HVSR)

    • -
    • ‘p’ shows a vertical dotted line at frequency of the “best” peak

    • -
    • ‘ann’ annotates the frequency value of of the “best” peak

    • -
    • ‘all’ shows all the peaks identified in check_peaks() (by default, only the max is identified)

    • -
    • ‘t’ shows the H/V curve for all time windows

    • -
    -
    -
    -
  • -
  • -
    ‘SPEC’ - spectrogram style plot of the H/V curve over time
      -
    • ‘p’ shows a horizontal dotted line at the frequency of the “best” peak

    • -
    • ‘ann’ annotates the frequency value of the “best” peak

    • -
    • ‘all’ shows all the peaks identified in check_peaks()

    • -
    • ‘tp’ shows all the peaks of the H/V curve at all time windows

    • -
    -
    -
    -
  • -
  • -
    ‘AZ’ - circular plot of calculated azimuthal HV curves, similar in style to SPEC plot.
      -
    • ‘p’ shows a point at each calculated (not interpolated) azimuth peak

    • -
    • ‘g’ shows grid lines at various angles

    • -
    • -
      ‘i’ interpolates so that there is an interpolated azimuth at each degree interval (1 degree step)

      This is the default, so usually ‘i’ is not needed.

      -
      -
      -
    • -
    • ‘-i’ prohibits interpolation (only shows the calculated azimuths, as determined by azimuth_angle (default = 30))

    • -
    -
    -
    -
  • -
-
-
azimuthstr, default = ‘HV’

What ‘azimuth’ to plot, default being standard N E components combined

-
-
use_subplotsbool, default = True

Whether to output the plots as subplots (True) or as separate plots (False)

-
-
figmatplotlib.Figure, default = None

If not None, matplotlib figure on which plot is plotted

-
-
axmatplotlib.Axis, default = None

If not None, matplotlib axis on which plot is plotted

-
-
return_figbool

Whether to return figure and axis objects

-
-
plot_enginestr, default=’Matplotlib’

Which engine to use for plotting. Both “matplotlib” and “plotly” are acceptable. For shorthand, ‘mpl’, ‘m’ also work for matplotlib; ‘plty’ or ‘p’ also work for plotly. Not case sensitive.

-
-
save_dirstr or None

Directory in which to save figures

-
-
save_suffixstr

Suffix to add to end of figure filename(s), if save_dir is used

-
-
show_legendbool, default=False

Whether to show legend in plot

-
-
show_plotbool

Whether to show plot

-
-
close_figsbool, default=False

Whether to close figures before plotting

-
-
clear_figbool, default=True

Whether to clear figures before plotting

-
-
**kwargskeyword arguments

Keyword arguments for matplotlib.pyplot

-
-
-
-
Returns:
-
-
fig, axmatplotlib figure and axis objects

Returns figure and axis matplotlib.pyplot objects if return_fig=True, otherwise, simply plots the figures

-
-
-
-
-
- -
-
-sprit.plot_outlier_curves(hvsr_data, plot_engine='plotly', rmse_thresh=0.98, use_percentile=True, use_hv_curve=False, from_roc=False, show_plot=True, verbose=False)[source]
-
- -
-
-sprit.plot_preview(hv_data, stream=None, preview_fig=None, spectrogram_component='Z', show_plot=True, return_fig=False)[source]
-
- -
-
-sprit.plot_results(hv_data, plot_string='HVSR p ann C+ p SPEC ann', results_fig=None, results_graph_widget=None, return_fig=False, show_results_plot=True)[source]
-
- -
-
-sprit.process_hvsr(hvsr_data, horizontal_method=None, smooth=True, freq_smooth='konno ohmachi', f_smooth_width=40, resample=True, outlier_curve_rmse_percentile=False, azimuth=None, verbose=False)[source]
-

Process the input data and get HVSR data

-

This is the main function that uses other (private) functions to do -the bulk of processing of the HVSR data and the data quality checks.

-
-
Parameters:
-
-
hvsr_dataHVSRData or HVSRBatch

Data object containing all the parameters input and generated by the user (usually, during sprit.input_params(), sprit.fetch_data(), sprit.generate_ppsds() and/or sprit.remove_noise()).

-
-
horizontal_methodint or str, default=3
-
Method to use for combining the horizontal components. Default is 3) Geometric Mean
    -
  1. (not used)

  2. -
  3. ‘Diffuse field assumption’ H = √( (eie_E + eie_N) / eie_Z), eie = equal interval energy

  4. -
  5. ‘Arithmetic Mean’ H ≡ (HN + HE)/2

  6. -
  7. ‘Geometric Mean’ H ≡ √(HN · HE), recommended by the SESAME project (2004)

  8. -
  9. ‘Vector Summation’ H ≡ √(HN^2 + HE^2)

  10. -
  11. ‘Quadratic Mean’ H ≡ √(HN^2 + HE^2)/2

  12. -
  13. ‘Maximum Horizontal Value’ H ≡ max {HN, HE}

  14. -
  15. ‘Minimum Horizontal Valey’ H ≡ min {HN, HE}

  16. -
  17. ‘Single Azimuth’ H = H2·cos(az) + H1·sin(az)

  18. -
-
-
-
-
smoothbool, default=True
-
bool or int may be used.

If True, default to smooth H/V curve to using savgoy filter with window length of 51 (works well with default resample of 1000 pts) -If int, the length of the window in the savgoy filter.

-
-
-
-
freq_smoothstr {‘konno ohmachi’, ‘constant’, ‘proportional’}
-
Which frequency smoothing method to use. By default, uses the ‘konno ohmachi’ method.
-
-
-

See here for more information: https://www.geopsy.org/documentation/geopsy/hv-processing.html

-
-
f_smooth_widthint, default = 40
    -
  • For ‘konno ohmachi’: passed directly to the bandwidth parameter of the konno_ohmachi_smoothing() function, determines the width of the smoothing peak, with lower values resulting in broader peak. Must be > 0.

  • -
  • For ‘constant’: the size of a triangular smoothing window in the number of frequency steps

  • -
  • For ‘proportional’: the size of a triangular smoothing window in percentage of the number of frequency steps (e.g., if 1000 frequency steps/bins and f_smooth_width=40, window would be 400 steps wide)

  • -
-
-
resamplebool, default = True
-
bool or int.

If True, default to resample H/V data to include 1000 frequency values for the rest of the analysis -If int, the number of data points to interpolate/resample/smooth the component psd/HV curve data to.

-
-
-
-
outlier_curve_rmse_percentilebool, float, default = False

If False, outlier curve removal is not carried out here. -If True, defaults to 98 (98th percentile). -Otherwise, float of percentile used as rmse_thresh of remove_outlier_curve().

-
-
azimuthfloat, default = None

The azimuth angle to use when method is single azimuth.

-
-
verbosebool, defualt=False

Whether to print output to terminal

-
-
-
-
Returns:
-
-
hvsr_outdict

Dictionary containing all the information about the data, including input parameters

-
-
-
-
-
- -
-
-sprit.read_from_RS(dest, src='SHAKENAME@HOSTNAME:/opt/data/archive/YEAR/AM/STATION/', opts='az', username='myshake', password='shakeme', hostname='rs.local', year='2023', sta='RAC84', sleep_time=0.1, verbose=True, save_progress=True, method='scp')[source]
-
- -
-
-sprit.read_tromino_files(input_data, params, struct_format='H', sampling_rate=128, start_byte=24576, verbose=False, **kwargs)[source]
-

Function to read data from tromino. Specifically, this has been lightly tested on Tromino 3G+ machines

-
-
Parameters:
-
-
input_datastr, pathlib.Path()

The input parameter _datapath_ from sprit.input_params()

-
-
paramsHVSRData or HVSRBatch

The parameters as read in from input_params() and and fetch_data()

-
-
verbosebool, optional

Whether to print results to terminal, by default False

-
-
-
-
Returns:
-
-
obspy.Stream

An obspy.Stream object containing the trace data from the Tromino instrument

-
-
-
-
-
- -
-
-sprit.remove_noise(hvsr_data, remove_method=None, processing_window=None, sat_percent=0.995, noise_percent=0.8, sta=2, lta=30, stalta_thresh=[8, 16], warmup_time=0, cooldown_time=0, min_win_size=1, remove_raw_noise=False, show_stalta_plot=False, verbose=False)[source]
-

Function to remove noisy windows from data, using various methods.

-

Methods include -- Manual window selection (by clicking on a chart with spectrogram and stream data), -- Auto window selection, which does the following two in sequence (these can also be done indepently):

-
-
    -
  • A sta/lta “antitrigger” method (using stalta values to automatically remove triggered windows where there appears to be too much noise)

  • -
  • A noise threshold method, that cuts off all times where the noise threshold equals more than (by default) 80% of the highest amplitude noise sample for the length specified by lta (in seconds)

  • -
  • A saturation threshold method, that cuts off all times where the noise threshold equals more than (by default) 99.5% of the highest amplitude noise sample.

  • -
-
-
-
Parameters:
-
-
hvsr_datadict, obspy.Stream, or obspy.Trace

Dictionary containing all the data and parameters for the HVSR analysis

-
-
remove_methodstr, {‘auto’, ‘manual’, ‘stalta’/’antitrigger’, ‘saturation threshold’, ‘noise threshold’, ‘warmup’/’cooldown’/’buffer’/’warm_cool’}

The different methods for removing noise from the dataset. A list of strings will also work, in which case, it should be a list of the above strings. See descriptions above for what how each method works. By default ‘auto.’ -If remove_method=’auto’, this is the equivalent of remove_method=[‘noise threshold’, ‘antitrigger’, ‘saturation threshold’, ‘warm_cool’]

-
-
processing_windowlist, tuple, or None

A list/tuple of two items [s, e] or a list/tuple of two-item lists/tuples [[s0, e0], [s1,e1],…[sn, en]] with start and end time(s) for windows to keep for processing. -Data outside of these times will be excluded from processing. -Times should be obspy.UTCDateTime objects to ensure precision, but time strings (“13:05”) will also work in most cases (excpetions may be when the data stream starts/ends on different UTC days)

-
-
sat_percentfloat, default=0.995

Percentage (between 0 and 1), to use as the threshold at which to remove data. This is used in the saturation method. By default 0.995. -If a value is passed that is greater than 1, it will be divided by 100 to obtain the percentage.

-
-
noise_percentfloat, default = 0.8

Percentage (between 0 and 1), to use as the threshold at which to remove data, if it persists for longer than time (in seconds (specified by min_win_size)). This is used in the noise threshold method. By default 0.8. -If a value is passed that is greater than 1, it will be divided by 100 to obtain the percentage.

-
-
staint, optional

Short term average (STA) window (in seconds), by default 2. For use with sta/lta antitrigger method.

-
-
ltaint, optional

Long term average (STA) window (in seconds), by default 30. For use with sta/lta antitrigger method.

-
-
stalta_threshlist, default=[0.5,5]

Two-item list or tuple with the thresholds for the stalta antitrigger. The first value (index [0]) is the lower threshold, the second value (index [1] is the upper threshold), by default [0.5,5]

-
-
warmup_timeint, default=0

Time in seconds to allow for warmup of the instrument (or while operator is still near instrument). This will renove any data before this time, by default 0.

-
-
cooldown_timeint, default=0

Time in seconds to allow for cooldown of the instrument (or for when operator is nearing instrument). This will renove any data before this time, by default 0.

-
-
min_win_sizefloat, default=1

The minumum size a window must be over specified threshold (in seconds) for it to be removed

-
-
remove_raw_noisebool, default=False

If remove_raw_noise=True, will perform operation on raw data (‘input_stream’), rather than potentially already-modified data (‘stream’).

-
-
verbosebool, default=False

Whether to print status of remove_noise

-
-
-
-
Returns:
-
-
outputdict

Dictionary similar to hvsr_data, but containing modified data with ‘noise’ removed

-
-
-
-
-
- -
-
-sprit.remove_outlier_curves(hvsr_data, rmse_thresh=98, use_percentile=True, use_hv_curve=False, plot_engine='matplotlib', show_plot=False, verbose=False)[source]
-

Function used to remove outliers curves using Root Mean Square Error to calculate the error of each windowed -Probabilistic Power Spectral Density (PPSD) curve against the median PPSD value at each frequency step for all times. -It calculates the RMSE for the PPSD curves of each component individually. All curves are removed from analysis.

-

Some abberant curves often occur due to the remove_noise() function, so this should be run some time after remove_noise(). -In general, the recommended workflow is to run this immediately following the generate_ppsds() function.

-
-
Parameters:
-
-
hvsr_datadict

Input dictionary containing all the values and parameters of interest

-
-
rmse_threshfloat or int, default=98

The Root Mean Square Error value to use as a threshold for determining whether a curve is an outlier. -This averages over each individual entire curve so that curves with very abberant data (often occurs when using the remove_noise() method), can be identified. -Otherwise, specify a float or integer to use as the cutoff RMSE value (all curves with RMSE above will be removed)

-
-
use_percentilefloat, default=True

Whether rmse_thresh should be interepreted as a raw RMSE value or as a percentile of the RMSE values.

-
-
use_hv_curvebool, default=False

Whether to use the calculated HV Curve or the individual components. This can only be True after process_hvsr() has been run.

-
-
show_plotbool, default=False

Whether to show a plot of the removed data

-
-
verbosebool, default=False

Whether to print output of function to terminal

-
-
-
-
Returns:
-
-
hvsr_datadict

Input dictionary with values modified based on work of function.

-
-
-
-
-
- -
-
-sprit.run(input_data, source='file', azimuth_calculation=False, noise_removal=False, outlier_curves_removal=False, verbose=False, **kwargs)[source]
-

The sprit.run() is the main function that allows you to do all your HVSR processing in one simple step (sprit.run() is how you would call it in your code, but it may also be called using sprit.sprit_hvsr.run())

-

The input_data parameter of sprit.run() is the only required parameter. This can be either a single file, a list of files (one for each component, for example), a directory (in which case, all obspy-readable files will be added to an HVSRBatch instance), a Rasp. Shake raw data directory, or sample data.

-
-
Parameters:
-
-
input_datastr or filepath object that can be read by obspy

Filepath to data to be processed. This may be a file or directory, depending on what kind of data is being processed (this can be specified with the source parameter). -For sample data, The following can be specified as the input_data parameter:

-
-
    -
  • Any integer 1-6 (inclusive), or the string (e.g., input_data=”1” or input_data=1 will work)

  • -
  • The word “sample” before any integer (e.g., input_data=”sample1”)

  • -
  • The word “sample” will default to “sample1” if source=’file’.

  • -
  • If source=’batch’, input_data should be input_data=’sample’ or input_data=’batch’. In this case, it will read and process all the sample files using the HVSRBatch class. Set verbose=True to see all the information in the sample batch csv file.

  • -
-
-
-
sourcestr, optional

_description_, by default ‘file’

-
-
azimuth_calculationbool, optional

Whether to perform azimuthal analysis, by default False.

-
-
noise_removalbool, default=False

Whether to remove noise (before processing PPSDs)

-
-
outlier_curves_removalbool, default=False

Whether to remove outlier curves from HVSR time windows

-
-
show_plotbool, default=True

Whether to show plots. This does not affect whether the plots are created (and then inserted as an attribute of HVSRData), only whether they are shown.

-
-
verbosebool, optional

_description_, by default False

-
-
**kwargs

Keyword arguments for the functions listed above. The keyword arguments are unique, so they will get parsed out and passed into the appropriate function.

-
-
input_paramsfunction name (not an actual parameter)

Function for designating input parameters for reading in and processing data -See API documentation: [input_params()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.input_params)

-
-
input_data: any, default = ‘<no default>’

See API documentation at link above or at help(sprit.input_params) for specifics.

-
-
site: any, default = ‘HVSR Site’

See API documentation at link above or at help(sprit.input_params) for specifics.

-
-
id_prefix: any, default = None

See API documentation at link above or at help(input_params) for specifics.

-
-
network: any, default = ‘AM’

See API documentation at link above or at help(sprit.input_params) for specifics.

-
-
station: any, default = ‘RAC84’

See API documentation at link above or at help(sprit.input_params) for specifics.

-
-
loc: any, default = ‘00’

See API documentation at link above or at help(sprit.input_params) for specifics.

-
-
channels: any, default = [‘EHZ’, ‘EHN’, ‘EHE’]

See API documentation at link above or at help(input_params) for specifics.

-
-
acq_date: any, default = ‘2024-10-30’

See API documentation at link above or at help(sprit.input_params) for specifics.

-
-
starttime: any, default = 2024-10-30T00:00:00.000000Z

See API documentation at link above or at help(input_params) for specifics.

-
-
endtime: any, default = 2024-10-30T23:59:59.999999Z

See API documentation at link above or at help(input_params) for specifics.

-
-
tzone: any, default = ‘UTC’

See API documentation at link above or at help(sprit.input_params) for specifics.

-
-
xcoord: any, default = -88.2290526

See API documentation at link above or at help(input_params) for specifics.

-
-
ycoord: any, default = 40.1012122

See API documentation at link above or at help(input_params) for specifics.

-
-
elevation: any, default = 755

See API documentation at link above or at help(input_params) for specifics.

-
-
input_crs: any, default = None

See API documentation at link above or at help(input_params) for specifics.

-
-
output_crs: any, default = None

See API documentation at link above or at help(input_params) for specifics.

-
-
elev_unit: any, default = ‘meters’

See API documentation at link above or at help(sprit.input_params) for specifics.

-
-
depth: any, default = 0

See API documentation at link above or at help(input_params) for specifics.

-
-
instrument: any, default = ‘Raspberry Shake’

See API documentation at link above or at help(sprit.input_params) for specifics.

-
-
metapath: any, default = None

See API documentation at link above or at help(input_params) for specifics.

-
-
hvsr_band: any, default = [0.4, 40]

See API documentation at link above or at help(input_params) for specifics.

-
-
peak_freq_range: any, default = [0.4, 40]

See API documentation at link above or at help(input_params) for specifics.

-
-
processing_parameters: any, default = {}

See API documentation at link above or at help(input_params) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(input_params) for specifics.

-
-
fetch_datafunction name (not an actual parameter)

Fetch ambient seismic data from a source to read into obspy stream -See API documentation: [fetch_data()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.fetch_data)

-
-
params: any, default = ‘<output of previous function>’

See API documentation at link above or at help(sprit.fetch_data) for specifics.

-
-
source: any, default = ‘file’

See API documentation at link above or at help(sprit.fetch_data) for specifics.

-
-
data_export_path: any, default = None

See API documentation at link above or at help(fetch_data) for specifics.

-
-
data_export_format: any, default = ‘mseed’

See API documentation at link above or at help(sprit.fetch_data) for specifics.

-
-
detrend: any, default = ‘spline’

See API documentation at link above or at help(sprit.fetch_data) for specifics.

-
-
detrend_order: any, default = 2

See API documentation at link above or at help(fetch_data) for specifics.

-
-
update_metadata: any, default = True

See API documentation at link above or at help(fetch_data) for specifics.

-
-
plot_input_stream: any, default = False

See API documentation at link above or at help(fetch_data) for specifics.

-
-
plot_engine: any, default = ‘matplotlib’

See API documentation at link above or at help(sprit.fetch_data) for specifics.

-
-
show_plot: any, default = True

See API documentation at link above or at help(fetch_data) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(fetch_data) for specifics.

-
-
kwargs: any, default = {}

See API documentation at link above or at help(fetch_data) for specifics.

-
-
calculate_azimuthfunction name (not an actual parameter)

Function to calculate azimuthal horizontal component at specified angle(s). Adds each new horizontal -See API documentation: [calculate_azimuth()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.calculate_azimuth)

-
-
hvsr_data: any, default = ‘<output of previous function>’

See API documentation at link above or at help(sprit.calculate_azimuth) for specifics.

-
-
azimuth_angle: any, default = 30

See API documentation at link above or at help(calculate_azimuth) for specifics.

-
-
azimuth_type: any, default = ‘multiple’

See API documentation at link above or at help(sprit.calculate_azimuth) for specifics.

-
-
azimuth_unit: any, default = ‘degrees’

See API documentation at link above or at help(sprit.calculate_azimuth) for specifics.

-
-
show_az_plot: any, default = False

See API documentation at link above or at help(calculate_azimuth) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(calculate_azimuth) for specifics.

-
-
plot_azimuth_kwargs: any, default = {}

See API documentation at link above or at help(calculate_azimuth) for specifics.

-
-
remove_noisefunction name (not an actual parameter)

Function to remove noisy windows from data, using various methods. -See API documentation: [remove_noise()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.remove_noise)

-
-
hvsr_data: any, default = ‘<output of previous function>’

See API documentation at link above or at help(sprit.remove_noise) for specifics.

-
-
remove_method: any, default = None

See API documentation at link above or at help(remove_noise) for specifics.

-
-
processing_window: any, default = None

See API documentation at link above or at help(remove_noise) for specifics.

-
-
sat_percent: any, default = 0.995

See API documentation at link above or at help(remove_noise) for specifics.

-
-
noise_percent: any, default = 0.8

See API documentation at link above or at help(remove_noise) for specifics.

-
-
sta: any, default = 2

See API documentation at link above or at help(remove_noise) for specifics.

-
-
lta: any, default = 30

See API documentation at link above or at help(remove_noise) for specifics.

-
-
stalta_thresh: any, default = [8, 16]

See API documentation at link above or at help(remove_noise) for specifics.

-
-
warmup_time: any, default = 0

See API documentation at link above or at help(remove_noise) for specifics.

-
-
cooldown_time: any, default = 0

See API documentation at link above or at help(remove_noise) for specifics.

-
-
min_win_size: any, default = 1

See API documentation at link above or at help(remove_noise) for specifics.

-
-
remove_raw_noise: any, default = False

See API documentation at link above or at help(remove_noise) for specifics.

-
-
show_stalta_plot: any, default = False

See API documentation at link above or at help(remove_noise) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(remove_noise) for specifics.

-
-
generate_ppsdsfunction name (not an actual parameter)

Generates PPSDs for each channel -See API documentation: [generate_ppsds()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.generate_ppsds)

-
-
hvsr_data: any, default = ‘<output of previous function>’

See API documentation at link above or at help(sprit.generate_ppsds) for specifics.

-
-
azimuthal_ppsds: any, default = False

See API documentation at link above or at help(generate_ppsds) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(generate_ppsds) for specifics.

-
-
ppsd_kwargs: any, default = {}

See API documentation at link above or at help(generate_ppsds) for specifics.

-
-
process_hvsrfunction name (not an actual parameter)

Process the input data and get HVSR data -See API documentation: [process_hvsr()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.process_hvsr)

-
-
hvsr_data: any, default = ‘<output of previous function>’

See API documentation at link above or at help(sprit.process_hvsr) for specifics.

-
-
horizontal_method: any, default = None

See API documentation at link above or at help(process_hvsr) for specifics.

-
-
smooth: any, default = True

See API documentation at link above or at help(process_hvsr) for specifics.

-
-
freq_smooth: any, default = ‘konno ohmachi’

See API documentation at link above or at help(sprit.process_hvsr) for specifics.

-
-
f_smooth_width: any, default = 40

See API documentation at link above or at help(process_hvsr) for specifics.

-
-
resample: any, default = True

See API documentation at link above or at help(process_hvsr) for specifics.

-
-
outlier_curve_rmse_percentile: any, default = False

See API documentation at link above or at help(process_hvsr) for specifics.

-
-
azimuth: any, default = None

See API documentation at link above or at help(process_hvsr) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(process_hvsr) for specifics.

-
-
remove_outlier_curvesfunction name (not an actual parameter)

Function used to remove outliers curves using Root Mean Square Error to calculate the error of each -See API documentation: [remove_outlier_curves()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.remove_outlier_curves)

-
-
hvsr_data: any, default = ‘<output of previous function>’

See API documentation at link above or at help(sprit.remove_outlier_curves) for specifics.

-
-
rmse_thresh: any, default = 98

See API documentation at link above or at help(remove_outlier_curves) for specifics.

-
-
use_percentile: any, default = True

See API documentation at link above or at help(remove_outlier_curves) for specifics.

-
-
use_hv_curve: any, default = False

See API documentation at link above or at help(remove_outlier_curves) for specifics.

-
-
plot_engine: any, default = ‘matplotlib’

See API documentation at link above or at help(sprit.remove_outlier_curves) for specifics.

-
-
show_plot: any, default = False

See API documentation at link above or at help(remove_outlier_curves) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(remove_outlier_curves) for specifics.

-
-
check_peaksfunction name (not an actual parameter)

Function to run tests on HVSR peaks to find best one and see if it passes quality checks -See API documentation: [check_peaks()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.check_peaks)

-
-
hvsr_data: any, default = ‘<output of previous function>’

See API documentation at link above or at help(sprit.check_peaks) for specifics.

-
-
hvsr_band: any, default = [0.4, 40]

See API documentation at link above or at help(check_peaks) for specifics.

-
-
peak_selection: any, default = ‘max’

See API documentation at link above or at help(sprit.check_peaks) for specifics.

-
-
peak_freq_range: any, default = [0.4, 40]

See API documentation at link above or at help(check_peaks) for specifics.

-
-
azimuth: any, default = ‘HV’

See API documentation at link above or at help(sprit.check_peaks) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(check_peaks) for specifics.

-
-
get_reportfunction name (not an actual parameter)

Generate and/or print and/or export a report of the HVSR analysis in a variety of formats. -See API documentation: [get_report()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.get_report)

-
-
hvsr_results: any, default = ‘<output of previous function>’

See API documentation at link above or at help(sprit.get_report) for specifics.

-
-
report_formats: any, default = [‘print’, ‘table’, ‘plot’, ‘html’, ‘pdf’]

See API documentation at link above or at help(get_report) for specifics.

-
-
azimuth: any, default = ‘HV’

See API documentation at link above or at help(sprit.get_report) for specifics.

-
-
plot_type: any, default = ‘HVSR p ann C+ p ann Spec p ann’

See API documentation at link above or at help(sprit.get_report) for specifics.

-
-
plot_engine: any, default = ‘matplotlib’

See API documentation at link above or at help(sprit.get_report) for specifics.

-
-
show_print_report: any, default = True

See API documentation at link above or at help(get_report) for specifics.

-
-
show_table_report: any, default = False

See API documentation at link above or at help(get_report) for specifics.

-
-
show_plot_report: any, default = True

See API documentation at link above or at help(get_report) for specifics.

-
-
show_html_report: any, default = False

See API documentation at link above or at help(get_report) for specifics.

-
-
show_pdf_report: any, default = True

See API documentation at link above or at help(get_report) for specifics.

-
-
suppress_report_outputs: any, default = False

See API documentation at link above or at help(get_report) for specifics.

-
-
show_report_outputs: any, default = False

See API documentation at link above or at help(get_report) for specifics.

-
-
csv_handling: any, default = ‘append’

See API documentation at link above or at help(sprit.get_report) for specifics.

-
-
report_export_format: any, default = None

See API documentation at link above or at help(get_report) for specifics.

-
-
report_export_path: any, default = None

See API documentation at link above or at help(get_report) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(get_report) for specifics.

-
-
kwargs: any, default = {}

See API documentation at link above or at help(get_report) for specifics.

-
-
export_datafunction name (not an actual parameter)

Export data into pickle format that can be read back in using import_data() so data does not need to -See API documentation: [export_data()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.export_data)

-
-
hvsr_data: any, default = ‘<output of previous function>’

See API documentation at link above or at help(sprit.export_data) for specifics.

-
-
hvsr_export_path: any, default = None

See API documentation at link above or at help(export_data) for specifics.

-
-
ext: any, default = ‘hvsr’

See API documentation at link above or at help(sprit.export_data) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(export_data) for specifics.

-
-
-
-
Returns:
-
-
hvsr_resultssprit.HVSRData or sprit.HVSRBatch object

If a single file/data point is being processed, a HVSRData object will be returned. Otherwise, it will be a HVSRBatch object. See their documention for more information.

-
-
-
-
Raises:
-
-
RuntimeError

If the input parameter may not be read correctly. This is raised if the input_params() function fails. This raises an error since no other data processing or reading steps will be able to carried out correctly.

-
-
RuntimeError

If the data is not read/fetched correctly using fetch_data(), an error will be raised. This is raised if the fetch_data() function fails. This raises an error since no other data processing steps will be able to carried out correctly.

-
-
RuntimeError

If the data being processed is a single file, an error will be raised if generate_ppsds() does not work correctly. No errors are raised for remove_noise() errors (since that is an optional step) and the process_hvsr() step (since that is the last processing step) .

-
-
-
-
-

Notes

-

The sprit.run() function calls the following functions. This is the recommended order/set of functions to run to process HVSR using SpRIT. See the API documentation for these functions for more information: -- input_params(): The input_data parameter of input_params() is the only required variable, though others may also need to be called for your data to process correctly. -- fetch_data(): the source parameter of fetch_data() is the only explicit variable in the sprit.run() function aside from input_data and verbose. Everything else gets delivered to the correct function via the kwargs dictionary -- remove_noise(): by default, the kind of noise removal is remove_method=’auto’. See the remove_noise() documentation for more information. If remove_method is set to anything other than one of the explicit options in remove_noise, noise removal will not be carried out. -- generate_ppsds(): generates ppsds for each component, which will be combined/used later. Any parameter of obspy.signal.spectral_estimation.PPSD() may also be read into this function. -- remove_outlier_curves(): removes any outlier ppsd curves so that the data quality for when curves are combined will be enhanced. See the remove_outlier_curves() documentation for more information. -- process_hvsr(): this is the main function processing the hvsr curve and statistics. See process_hvsr() documentation for more details. The hvsr_band parameter sets the frequency spectrum over which these calculations occur. -- check_peaks(): this is the main function that will find and ‘score’ peaks to get a best peak. The parameter peak_freq_range can be set to limit the frequencies within which peaks are checked and scored. -- get_report(): this is the main function that will print, plot, and/or save the results of the data. See the get_report() API documentation for more information. -- export_data(): this function exports the final data output as a pickle file (by default, this pickle object has a .hvsr extension). This can be used to read data back into SpRIT without having to reprocess data.

-
- -
-
-sprit.time_it(_t, proc_name='', verbose=True)[source]
-

Computes elapsed time since the last call.

-
- -
-
-sprit.x_mark(incolor=False, inTerminal=False)[source]
-

The default Windows terminal is not able to display the check mark character correctly. -This function returns another displayable character if platform is Windows

-
- -
-

Submodules

-
- -
-
-
- - -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/sprit.sprit_cli.html b/docs/_build/html/sprit.sprit_cli.html deleted file mode 100644 index 03e46fa3..00000000 --- a/docs/_build/html/sprit.sprit_cli.html +++ /dev/null @@ -1,183 +0,0 @@ - - - - - - - sprit.sprit_cli module — sprit 1.4 documentation - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -
-

sprit.sprit_cli module

-

This module/script is used to run sprit from the command line.

-

The arguments here should correspond to any of the keyword arguments that can be used with sprit.run() (or sprit_hvsr.run()). See the run() function’s documentation for more information, or the individual functions that are run within it.

-

For list inputs, you should pass the argument multiple times(e.g., –report_format “csv” –report_format “print” –report_format “plot”). (In the case of –report_format, you can also just use “all” to get csv, print, and plot report types)

-

The input_data parameter of input_params() is the only required argument, though for your data processing to work correctly and to be formatted correctly, you may need to pass others as well.

-
-
-sprit.sprit_cli.get_param_docstring(func, param_name)[source]
-
- -
-
-sprit.sprit_cli.main()[source]
-
- -
- - -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/sprit.sprit_hvsr.html b/docs/_build/html/sprit.sprit_hvsr.html deleted file mode 100644 index ce967a69..00000000 --- a/docs/_build/html/sprit.sprit_hvsr.html +++ /dev/null @@ -1,1869 +0,0 @@ - - - - - - - sprit.sprit_hvsr module — sprit 1.4 documentation - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -
-

sprit.sprit_hvsr module

-

This module is the main SpRIT module that contains all the functions needed to run HVSR analysis.

-

The functions defined here are read both by the SpRIT graphical user interface and by the command-line interface to run HVSR analysis on input data.

-

See documentation for individual functions for more information.

-
-
-class sprit.sprit_hvsr.HVSRBatch(*args, **kwargs)[source]
-

Bases: object

-

HVSRBatch is the data container used for batch processing. -It contains several HVSRData objects (one for each site). -These can be accessed using their site name, -either square brackets (HVSRBatchVariable[“SiteName”]) or the dot (HVSRBatchVariable.SiteName) accessor.

-

The dot accessor may not work if there is a space in the site name.

-

All of the functions in the sprit package are designed to perform the bulk of their operations iteratively -on the individual HVSRData objects contained in the HVSRBatch object, and do little with the HVSRBatch object itself, -besides using it determine which sites are contained within it.

-

Methods

- - - - - - - - - - - - - - - - - - - - - - - - - - - -

copy([type])

Make a copy of the HVSRBatch object.

export([hvsr_export_path, ext])

Method to export HVSRData objects in HVSRBatch container to indivdual .hvsr pickle files.

export_settings([site_name, ...])

Method to export settings from HVSRData object in HVSRBatch object.

get_report(**kwargs)

Method to get report from processed data, in print, graphical, or tabular format.

items()

Method to return both the site names and the HVSRData object as a set of dict_items tuples.

keys()

Method to return the "keys" of the HVSRBatch object.

plot(**kwargs)

Method to plot data, based on the sprit.plot_hvsr() function.

report(**kwargs)

Wrapper of get_report()

-
-
-copy(type='shallow')[source]
-

Make a copy of the HVSRBatch object. Uses python copy module.

-
-
Parameters:
-
-
typestr {‘shallow’, ‘deep’}

Based on input, creates either a shallow or deep copy of the HVSRBatch object. Shallow is equivalent of copy.copy(). Input of ‘deep’ is equivalent of copy.deepcopy() (still experimental). Defaults to shallow.

-
-
-
-
-
- -
-
-export(hvsr_export_path=True, ext='hvsr')[source]
-

Method to export HVSRData objects in HVSRBatch container to indivdual .hvsr pickle files.

-
-
Parameters:
-
-
hvsr_export_pathfilepath, default=True

Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). By default True. If True, it will first try to save each file to the same directory as input_data, then if that does not work, to the current working directory, then to the user’s home directory, by default True

-
-
extstr, optional

The extension to use for the output, by default ‘hvsr’. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension.

-
-
-
-
-
- -
-
-export_settings(site_name=None, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True)[source]
-

Method to export settings from HVSRData object in HVSRBatch object.

-

Simply calls sprit.export_settings() from specified HVSRData object in the HVSRBatch object. -See sprit.export_settings() for more details.

-
-
Parameters:
-
-
site_namestr, default=None

The name of the site whose settings should be exported. If None, will default to the first site, by default None.

-
-
export_settings_pathstr, optional

Filepath to output file. If left as ‘default’, will save as the default value in the resources directory. If that is not possible, will save to home directory, by default ‘default’

-
-
export_settings_typestr, {‘all’, ‘instrument’, ‘processing’}, optional

They type of settings to save, by default ‘all’

-
-
include_locationbool, optional

Whether to include the location information in the instrument settings, if that settings type is selected, by default False

-
-
verbosebool, optional

Whether to print output (filepath and settings) to terminal, by default True

-
-
-
-
-
-

See also

-
-
export_settings
-
-
-
- -
-
-get_report(**kwargs)[source]
-

Method to get report from processed data, in print, graphical, or tabular format.

-
-
Returns:
-
-
Variable

May return nothing, pandas.Dataframe, or pyplot Figure, depending on input.

-
-
-
-
-
-

See also

-
-
get_report
-
-
-
- -
-
-items()[source]
-

Method to return both the site names and the HVSRData object as a set of dict_items tuples. Functions similar to dict.items().

-
-
Returns:
-
-
_type_

_description_

-
-
-
-
-
- -
-
-keys()[source]
-

Method to return the “keys” of the HVSRBatch object. For HVSRBatch objects, these are the site names. Functions similar to dict.keys().

-
-
Returns:
-
-
dict_keys

A dict_keys object listing the site names of each of the HVSRData objects contained in the HVSRBatch object

-
-
-
-
-
- -
-
-plot(**kwargs)[source]
-

Method to plot data, based on the sprit.plot_hvsr() function.

-

All the same kwargs and default values apply as plot_hvsr(). -For return_fig, returns it to the ‘Plot_Report’ attribute of each HVSRData object

-
-
Returns:
-
-
_type_

_description_

-
-
-
-
-
-

See also

-
-
plot_hvsr
-
-
-
- -
-
-report(**kwargs)[source]
-

Wrapper of get_report()

-
-

See also

-
-
get_report
-
-
-
- -
- -
-
-class sprit.sprit_hvsr.HVSRData(*args, **kwargs)[source]
-

Bases: object

-

HVSRData is the basic data class of the sprit package. -It contains all the processed data, input parameters, and reports.

-

These attributes and objects can be accessed using square brackets or the dot accessor. For example, to access the site name, HVSRData[‘site’] and HVSRData.site will both return the site name.

-

Some of the methods that work on the HVSRData object (e.g., .plot() and .get_report()) are essentially wrappers for some of the main sprit package functions (sprit.plot_hvsr() and sprit.get_report(), respectively)

-
-
Attributes:
-
-
batch

Whether this HVSRData object is part of an HVSRBatch object.

-
-
datastream

A copy of the original obspy datastream read in.

-
-
params

Dictionary containing the parameters used to process the data

-
-
ppsds

Dictionary copy of the class object obspy.signal.spectral_estimation.PPSD().

-
-
ppsds_obspy

The original ppsd information from the obspy.signal.spectral_estimation.PPSD(), so as to keep original if copy is manipulated/changed.

-
-
-
-
-

Methods

- - - - - - - - - - - - - - - - - - - - - - - - - - - -

copy([type])

Make a copy of the HVSRData object.

export([hvsr_export_path, ext])

Method to export HVSRData objects to .hvsr pickle files.

export_settings([export_settings_path, ...])

Method to export settings from HVSRData object.

get_report(**kwargs)

Method to get report from processed data, in print, graphical, or tabular format.

items()

Method to return the "items" of the HVSRData object.

keys()

Method to return the "keys" of the HVSRData object.

plot(**kwargs)

Method to plot data, wrapper of sprit.plot_hvsr()

report(**kwargs)

Wrapper of get_report()

-
-
-property batch
-

Whether this HVSRData object is part of an HVSRBatch object. This is used throughout the code to help direct the object into the proper processing pipeline.

-
-
Returns:
-
-
bool

True if HVSRData object is part of HVSRBatch object, otherwise, False

-
-
-
-
-
- -
-
-copy(type='shallow')[source]
-

Make a copy of the HVSRData object. Uses python copy module.

-
-
Parameters:
-
-
typestr {‘shallow’, ‘deep’}

Based on input, creates either a shallow or deep copy of the HVSRData object. Shallow is equivalent of copy.copy(). Input of type=’deep’ is equivalent of copy.deepcopy() (still experimental). Defaults to shallow.

-
-
-
-
-
- -
-
-property datastream
-

A copy of the original obspy datastream read in. This helps to retain the original data even after processing is carried out.

-
-
Returns:
-
-
obspy.core.Stream.stream

Obspy stream

-
-
-
-
-
- -
-
-export(hvsr_export_path=None, ext='hvsr')[source]
-

Method to export HVSRData objects to .hvsr pickle files.

-
-
Parameters:
-
-
hvsr_export_pathfilepath, default=True

Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). -By default True. -If True, it will first try to save each file to the same directory as input_data, then if that does not work, to the current working directory, then to the user’s home directory, by default True

-
-
extstr, optional

The extension to use for the output, by default ‘hvsr’. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension.

-
-
-
-
-
- -
-
-export_settings(export_settings_path='default', export_settings_type='all', include_location=False, verbose=True)[source]
-

Method to export settings from HVSRData object. Simply calls sprit.export_settings() from the HVSRData object. See sprit.export_settings() for more details.

-
-
Parameters:
-
-
export_settings_pathstr, optional

Filepath to output file. If left as ‘default’, will save as the default value in the resources directory. If that is not possible, will save to home directory, by default ‘default’

-
-
export_settings_typestr, {‘all’, ‘instrument’, ‘processing’}, optional

They type of settings to save, by default ‘all’

-
-
include_locationbool, optional

Whether to include the location information in the instrument settings, if that settings type is selected, by default False

-
-
verbosebool, optional

Whether to print output (filepath and settings) to terminal, by default True

-
-
-
-
-
- -
-
-get_report(**kwargs)[source]
-

Method to get report from processed data, in print, graphical, or tabular format.

-
-
Returns:
-
-
Variable

May return nothing, pandas.Dataframe, or pyplot Figure, depending on input.

-
-
-
-
-
-

See also

-
-
get_report
-
-
-
- -
-
-items()[source]
-

Method to return the “items” of the HVSRData object. For HVSRData objects, this is a dict_items object with the keys and values in tuples. Functions similar to dict.items().

-
-
Returns:
-
-
dict_items

A dict_items object of the HVSRData objects attributes, parameters, etc.

-
-
-
-
-
- -
-
-keys()[source]
-

Method to return the “keys” of the HVSRData object. For HVSRData objects, these are the attributes and parameters of the object. Functions similar to dict.keys().

-
-
Returns:
-
-
dict_keys

A dict_keys object of the HVSRData objects attributes, parameters, etc.

-
-
-
-
-
- -
-
-property params
-

Dictionary containing the parameters used to process the data

-
-
Returns:
-
-
dict

Dictionary containing the process parameters

-
-
-
-
-
- -
-
-plot(**kwargs)[source]
-

Method to plot data, wrapper of sprit.plot_hvsr()

-
-
Returns:
-
-
matplotlib.Figure, matplotlib.Axis (if return_fig=True)
-
-
-
-
-

See also

-
-
plot_hvsr
-
plot_azimuth
-
-
-
- -
-
-property ppsds
-

Dictionary copy of the class object obspy.signal.spectral_estimation.PPSD(). The dictionary copy allows manipulation of the data in PPSD, whereas that data cannot be easily manipulated in the original Obspy object.

-
-
Returns:
-
-
dict

Dictionary copy of the PPSD information from generate_ppsds()

-
-
-
-
-
- -
-
-property ppsds_obspy
-

The original ppsd information from the obspy.signal.spectral_estimation.PPSD(), so as to keep original if copy is manipulated/changed.

-
- -
-
-report(**kwargs)[source]
-

Wrapper of get_report()

-
-

See also

-
-
get_report
-
-
-
- -
- -
-
-sprit.sprit_hvsr.batch_data_read(batch_data, batch_type='table', param_col=None, batch_params=None, verbose=False, **readcsv_getMeta_fetch_kwargs)[source]
-

Function to read data in data as a batch of multiple data files. This is best used through sprit.fetch_data(*args, source=’batch’, **other_kwargs).

-
-
Parameters:
-
-
batch_datafilepath or list

Input data information for how to read in data as batch. Can be filepath or list of filepaths/stream objects. -If filepath, should point to .csv (or similar that can be read by pandas.read_csv()) with batch data information.

-
-
batch_typestr, optional

Type of batch read, only ‘table’ and ‘filelist’ accepted. -If ‘table’, will read data from a file read in using pandas.read_csv(), by default ‘table’

-
-
param_colNone or str, optional

Name of parameter column from batch information file. Only used if a batch_type=’table’ and single parameter column is used, rather than one column per parameter (for single parameter column, parameters are formatted with = between keys/values and , between item pairs), by default None

-
-
batch_paramslist, dict, or None, default = None

Parameters to be used if batch_type=’filelist’. If it is a list, needs to be the same length as batch_data. If it is a dict, will be applied to all files in batch_data and will combined with extra keyword arguments caught by **readcsv_getMeta_fetch_kwargs.

-
-
verbosebool, optional

Whether to print information to terminal during batch read, by default False

-
-
**readcsv_getMeta_fetch_kwargs

Keyword arguments that will be read into pandas.read_csv(), sprit.input_params, sprit.get_metadata(), and/or sprit.fetch_data()

-
-
-
-
Returns:
-
-
hvsrBatch

HVSRBatch object with each item representing a different HVSRData object

-
-
-
-
Raises:
-
-
IndexError

_description_

-
-
-
-
-
- -
-
-sprit.sprit_hvsr.calculate_azimuth(hvsr_data, azimuth_angle=30, azimuth_type='multiple', azimuth_unit='degrees', show_az_plot=False, verbose=False, **plot_azimuth_kwargs)[source]
-

Function to calculate azimuthal horizontal component at specified angle(s). Adds each new horizontal component as a radial component to obspy.Stream object at hvsr_data[‘stream’]

-
-
Parameters:
-
-
hvsr_dataHVSRData

Input HVSR data

-
-
azimuth_angleint, default=10

If azimuth_type=’multiple’, this is the angular step (in unit azimuth_unit) of each of the azimuthal measurements. -If azimuth_type=’single’ this is the angle (in unit azimuth_unit) of the single calculated azimuthal measruement. By default 10.

-
-
azimuth_typestr, default=’multiple’

What type of azimuthal measurement to make, by default ‘multiple’. -If ‘multiple’ (or {‘multi’, ‘mult’, ‘m’}), will take a measurement at each angular step of azimuth_angle of unit azimuth_unit. -If ‘single’ (or {‘sing’, ‘s’}), will take a single azimuthal measurement at angle specified in azimuth_angle.

-
-
azimuth_unitstr, default=’degrees’

Angular unit used to specify azimuth_angle parameter. By default ‘degrees’. -If ‘degrees’ (or {‘deg’, ‘d’}), will use degrees. -If ‘radians’ (or {‘rad’, ‘r’}), will use radians.

-
-
show_az_plotbool, default=False

Whether to show azimuthal plot, by default False.

-
-
verbosebool, default=False

Whether to print terminal output, by default False

-
-
-
-
Returns:
-
-
HVSRData

Updated HVSRData object specified in hvsr_data with hvsr_data[‘stream’] attribute containing additional components (EHR-*), -with * being zero-padded (3 digits) azimuth angle in degrees.

-
-
-
-
-
- -
-
-sprit.sprit_hvsr.check_instance(init)[source]
-
- -
-
-sprit.sprit_hvsr.check_peaks(hvsr_data, hvsr_band=[0.4, 40], peak_selection='max', peak_freq_range=[0.4, 40], azimuth='HV', verbose=False)[source]
-

Function to run tests on HVSR peaks to find best one and see if it passes quality checks

-
-
Parameters:
-
-
hvsr_datadict

Dictionary containing all the calculated information about the HVSR data (i.e., hvsr_out returned from process_hvsr)

-
-
hvsr_bandtuple or list, default=[0.4, 40]

2-item tuple or list with lower and upper limit of frequencies to analyze

-
-
peak_selectionstr or numeric, default=’max’

How to select the “best” peak used in the analysis. For peak_selection=”max” (default value), the highest peak within peak_freq_range is used. -For peak_selection=’scored’, an algorithm is used to select the peak based in part on which peak passes the most SESAME criteria. -If a numeric value is used (e.g., int or float), this should be a frequency value to manually select as the peak of interest.

-
-
peak_freq_rangetuple or list, default=[0.4, 40];

The frequency range within which to check for peaks. If there is an HVSR curve with multiple peaks, this allows the full range of data to be processed while limiting peak picks to likely range.

-
-
verbosebool, default=False

Whether to print results and inputs to terminal.

-
-
-
-
Returns:
-
-
hvsr_dataHVSRData or HVSRBatch object

Object containing previous input data, plus information about peak tests

-
-
-
-
-
- -
-
-sprit.sprit_hvsr.export_data(hvsr_data, hvsr_export_path=None, ext='hvsr', verbose=False)[source]
-

Export data into pickle format that can be read back in using import_data() so data does not need to be processed each time. -Default extension is .hvsr but it is still a pickled file that can be read in using pickle.load().

-
-
Parameters:
-
-
hvsr_dataHVSRData or HVSRBatch

Data to be exported

-
-
hvsr_export_pathstr or filepath object, default = None

String or filepath object to be read by pathlib.Path() and/or a with open(hvsr_export_path, ‘wb’) statement. If None, defaults to input input_data directory, by default None

-
-
extstr, default = ‘hvsr’

Filepath extension to use for data file, by default ‘hvsr’

-
-
-
-
-
- -
-
-sprit.sprit_hvsr.export_report(hvsr_results, report_export_path=None, report_export_format=['pdf'], azimuth='HV', csv_handling='rename', show_report=True, verbose=False)[source]
-

Function to export reports to disk. Exportable formats include: -* ‘table’: saves a pandas DataFrame as a csv) -* ‘plot’: saves the matplotlib or plotly plot figure (depending on what is designated via plot_engine) as an image (png by default) -* ‘print’: saves the print report as a .txt file -* ‘html’: saves the html report as a .html file -* ‘pdf’: saves the pdf report as a .pdf file

-
-
Parameters:
-
-
hvsr_resultsHVSRData object

HVSRData object containing the HVSR data

-
-
report_export_pathpath-like object, optional

The path to where the report should be exported. -If this is None (default), this is written to the home directory. -If this is a True, uses the same directory as the input data, but generates a filename. -If this is a directory, generates a filename. -If filename is specified and the extension does not match the report type, the extension is adjusted. -Otherwise, this is the output file or , by default None

-
-
csv_handling{‘rename’, ‘append’, ‘overwrite’, ‘keep’}, optional

If table is the report type, this can prevent overwriting data, by default ‘rename’. -* “rename” (or “keep”): renames the new file to prevent overwrite, appends a digit to the end of filename -* “append”: appends the new data to the existing file -* “overwrite”: overwrites the existing file

-
-
report_export_formatstr or list, optional

The format (or a list of formats) to export the report, by default [‘pdf’].

-
-
show_reportbool, optional

Whether to show the designated reports that were chosen for export, by default True

-
-
verbosebool, optional

Whether to print progress and other information to terminal, by default False

-
-
-
-
Returns:
-
-
HVSRData

An HVSRData object that is the same as hvsr_results, but with any additionally generated reports.

-
-
-
-
-
- -
-
-sprit.sprit_hvsr.export_settings(hvsr_data, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True)[source]
-

Save settings to json file

-
-
Parameters:
-
-
export_settings_pathstr, default=”default”

Where to save the json file(s) containing the settings, by default ‘default’. -If “default,” will save to sprit package resources. Otherwise, set a filepath location you would like for it to be saved to. -If ‘all’ is selected, a directory should be supplied. -Otherwise, it will save in the directory of the provided file, if it exists. Otherwise, defaults to the home directory.

-
-
export_settings_typestr, {‘all’, ‘instrument’, ‘processing’}

What kind of settings to save. -If ‘all’, saves all possible types in their respective json files. -If ‘instrument’, save the instrument settings to their respective file. -If ‘processing’, saves the processing settings to their respective file. By default ‘all’

-
-
include_locationbool, default=False, input CRS

Whether to include the location parametersin the exported settings document.This includes xcoord, ycoord, elevation, elev_unit, and input_crs

-
-
verbosebool, default=True

Whether to print outputs and information to the terminal

-
-
-
-
-
- -
-
-sprit.sprit_hvsr.fetch_data(params, source='file', data_export_path=None, data_export_format='mseed', detrend='spline', detrend_order=2, update_metadata=True, plot_input_stream=False, plot_engine='matplotlib', show_plot=True, verbose=False, **kwargs)[source]
-

Fetch ambient seismic data from a source to read into obspy stream

-
-
Parameters:
-
-
paramsdict
-
Dictionary containing all the necessary params to get data.

Parameters defined using input_params() function.

-
-
-
-
sourcestr, {‘raw’, ‘dir’, ‘file’, ‘batch’}
-
String indicating where/how data file was created. For example, if raw data, will need to find correct channels.

‘raw’ finds raspberry shake data, from raw output copied using scp directly from Raspberry Shake, either in folder or subfolders; -‘dir’ is used if the day’s 3 component files (currently Raspberry Shake supported only) are all 3 contained in a directory by themselves. -‘file’ is used if the params[‘input_data’] specified in input_params() is the direct filepath to a single file to be read directly into an obspy stream. -‘batch’ is used to read a list or specified set of seismic files.

-
-

Most commonly, a csv file can be read in with all the parameters. Each row in the csv is a separate file. Columns can be arranged by parameter.

-
-
-
-
-
data_export_pathNone or str or pathlib obj, default=None

If None (or False), data is not trimmed in this function. -Otherwise, this is the directory to save trimmed and exported data.

-
-
data_export_format: str=’mseed’

If data_export_path is not None, this is the format in which to save the data

-
-
detrendstr or bool, default=’spline’

If False, data is not detrended. -Otherwise, this should be a string accepted by the type parameter of the obspy.core.trace.Trace.detrend method: https://docs.obspy.org/packages/autogen/obspy.core.trace.Trace.detrend.html

-
-
detrend_orderint, default=2

If detrend parameter is ‘spline’ or ‘polynomial’, this is passed directly to the order parameter of obspy.core.trace.Trace.detrend method.

-
-
update_metadatabool, default=True

Whether to update the metadata file, used primarily with Raspberry Shake data which uses a generic inventory file.

-
-
plot_input_streambool, default=False

Whether to plot the raw input stream. This plot includes a spectrogram (Z component) and the raw (with decimation for speed) plots of each component signal.

-
-
plot_enginestr, default=’matplotlib’

Which plotting library/engine to use for plotting the Input stream. Options are ‘matplotlib’, ‘plotly’, or ‘obspy’ (not case sensitive).

-
-
verbosebool, default=False

Whether to print outputs and inputs to the terminal

-
-
**kwargs

Keywords arguments, primarily for ‘batch’ and ‘dir’ sources

-
-
-
-
Returns:
-
-
paramsHVSRData or HVSRBatch object

Same as params parameter, but with an additional “stream” attribute with an obspy data stream with 3 traces: Z (vertical), N (North-south), and E (East-west)

-
-
-
-
-
- -
-
-sprit.sprit_hvsr.generate_ppsds(hvsr_data, azimuthal_ppsds=False, verbose=False, **ppsd_kwargs)[source]
-

Generates PPSDs for each channel

-

Channels need to be in Z, N, E order -Info on PPSD creation here: https://docs.obspy.org/packages/autogen/obspy.signal.spectral_estimation.PPSD.html

-
-
Parameters:
-
-
hvsr_datadict, HVSRData object, or HVSRBatch object

Data object containing all the parameters and other data of interest (stream and paz, for example)

-
-
azimuthal_ppsdsbool, default=False

Whether to generate PPSDs for azimuthal data

-
-
verbosebool, default=True

Whether to print inputs and results to terminal

-
-
**ppsd_kwargsdict

Dictionary with keyword arguments that are passed directly to obspy.signal.PPSD. -If the following keywords are not specified, their defaults are amended in this function from the obspy defaults for its PPSD function. Specifically:

-
-
    -
  • ppsd_length defaults to 30 (seconds) here instead of 3600

  • -
  • skip_on_gaps defaults to True instead of False

  • -
  • period_step_octaves defaults to 0.03125 instead of 0.125

  • -
-
-
-
-
-
Returns:
-
-
ppsdsHVSRData object

Dictionary containing entries with ppsds for each channel

-
-
-
-
-
- -
-
-sprit.sprit_hvsr.get_metadata(params, write_path='', update_metadata=True, source=None, **read_inventory_kwargs)[source]
-

Get metadata and calculate or get paz parameter needed for PPSD

-
-
Parameters:
-
-
paramsdict
-
Dictionary containing all the input and other parameters needed for processing

Ouput from input_params() function

-
-
-
-
write_pathstr
-
String with output filepath of where to write updated inventory or metadata file

If not specified, does not write file

-
-
-
-
update_metadatabool

Whether to update the metadata file itself, or just read as-is. If using provided raspberry shake metadata file, select True.

-
-
sourcestr, default=None

This passes the source variable value to _read_RS_metadata. It is expected that this is passed directly from the source parameter of sprit.fetch_data()

-
-
-
-
Returns:
-
-
paramsdict

Modified input dictionary with additional key:value pair containing paz dictionary (key = “paz”)

-
-
-
-
-
- -
-
-sprit.sprit_hvsr.get_report(hvsr_results, report_formats=['print', 'table', 'plot', 'html', 'pdf'], azimuth='HV', plot_type='HVSR p ann C+ p ann Spec p ann', plot_engine='matplotlib', show_print_report=True, show_table_report=False, show_plot_report=True, show_html_report=False, show_pdf_report=True, suppress_report_outputs=False, show_report_outputs=False, csv_handling='append', report_export_format=None, report_export_path=None, verbose=False, **kwargs)[source]
-

Generate and/or print and/or export a report of the HVSR analysis in a variety of formats.

-

Formats include: -* ‘print’: A (monospace) text summary of the HVSR results -* ‘table’: A pandas.DataFrame summary of the HVSR Results.

-
-

This is useful for copy/pasting directly into a larger worksheet.

-
-
    -
  • ‘plot’: A plot summary of the HVSR results, generated using the plot_hvsr() function.

  • -
  • ‘html’: An HTML document/text of the HVSR results. This includes the table, print, and plot reports in one document.

  • -
  • -
    ‘pdf’: A PDF document showing the summary of the HVSR Results.

    The PDF report is simply the HTML report saved to an A4-sized PDF document.

    -
    -
    -
  • -
-
-
Parameters:
-
-
hvsr_resultsdict

Dictionary containing all the information about the processed hvsr data

-
-
report_formats{‘table’, ‘print’, plot}

Format in which to print or export the report. -The following report_formats return the following items in the following attributes:

-
-
    -
  • ‘plot’: hvsr_results[‘Print_Report’] as a str

  • -
  • ‘print’: hvsr_results[‘HV_Plot’] - matplotlib.Figure object

  • -
  • -
    ‘table’: hvsr_results[‘Table_Report’]- pandas.DataFrame object
      -
    • list/tuple - a list or tuple of the above objects, in the same order they are in the report_formats list

    • -
    -
    -
    -
  • -
  • ‘html’: hvsr_results[‘HTML_Report’] - a string containing the text for an HTML document

  • -
  • ‘pdf’: currently does not save to the HVSRData object itself, can only be saved to the disk directly

  • -
-
-
-
plot_typestr, default = ‘HVSR p ann C+ p ann Spec

What type of plot to plot, if ‘plot’ part of report_formats input

-
-
azimuthstr, default = ‘HV’

Which azimuth to plot, by default “HV” which is the main “azimuth” combining the E and N components

-
-
csv_handlingstr, {‘append’, ‘overwrite’, ‘keep/rename’}

How to handle table report outputs if the designated csv output file already exists. By default, appends the new information to the end of the existing file.

-
-
suppress_report_outputsbool, default=False

If True, only reads output to appropriate attribute of data class (ie, print does not print, only reads text into variable). If False, performs as normal.

-
-
report_export_formatlist or str, default=[‘pdf’]

A string or list of strings indicating which report formats should be exported to disk.

-
-
report_export_pathNone, bool, or filepath, default = None

If None or False, does not export; if True, will export to same directory as the input_data parameter in the input_params() function. -Otherwise, it should be a string or path object indicating where to export results. May be a file or directory. -If a directory is specified, the filename will be “<site_name>_<acq_date>_<UTC start time>-<UTC end time>”. -The extension/suffix defaults to png for report_formats=”plot”, csv for ‘table’, txt for ‘print’, html for ‘html’, and pdf for ‘pdf.’

-
-
verbosebool, default=True

Whether to print the results to terminal. This is the same output as report_formats=’print’, and will not repeat if that is already selected

-
-
-
-
Returns:
-
-
sprit.HVSRData
-
-
-
-
- -
-
-sprit.sprit_hvsr.gui(kind='browser')[source]
-

Function to open a graphical user interface (gui)

-
-
Parameters:
-
-
kindstr, optional

What type of gui to open. “default” opens regular windowed interface, -“widget” opens jupyter widget’ -“lite” open lite (pending update), by default ‘default’

-
-
-
-
-
- -
-
-sprit.sprit_hvsr.gui_test()[source]
-
- -
-
-sprit.sprit_hvsr.import_data(import_filepath, data_format='pickle')[source]
-

Function to import .hvsr (or other extension) data exported using export_data() function

-
-
Parameters:
-
-
import_filepathstr or path object

Filepath of file created using export_data() function. This is usually a pickle file with a .hvsr extension

-
-
data_formatstr, default=’pickle’

Type of format data is in. Currently, only ‘pickle’ supported. Eventually, json or other type may be supported, by default ‘pickle’.

-
-
-
-
Returns:
-
-
HVSRData or HVSRBatch object
-
-
-
-
- -
-
-sprit.sprit_hvsr.import_settings(settings_import_path, settings_import_type='instrument', verbose=False)[source]
-
- -
-
-sprit.sprit_hvsr.input_params(input_data, site='HVSR Site', id_prefix=None, network='AM', station='RAC84', loc='00', channels=['EHZ', 'EHN', 'EHE'], acq_date='2024-10-30', starttime=UTCDateTime(2024, 10, 30, 0, 0), endtime=UTCDateTime(2024, 10, 30, 23, 59, 59, 999999), tzone='UTC', xcoord=-88.2290526, ycoord=40.1012122, elevation=755, input_crs=None, output_crs=None, elev_unit='meters', depth=0, instrument='Raspberry Shake', metapath=None, hvsr_band=[0.4, 40], peak_freq_range=[0.4, 40], processing_parameters={}, verbose=False)[source]
-

Function for designating input parameters for reading in and processing data

-
-
Parameters:
-
-
input_datastr or pathlib.Path object

Filepath of data. This can be a directory or file, but will need to match with what is chosen later as the source parameter in fetch_data()

-
-
sitestr, default=”HVSR Site”

Site name as designated by user for ease of reference. Used for plotting titles, filenames, etc.

-
-
id_prefixstr, default=None

A prefix that may be used to create unique identifiers for each site. -The identifier created is saved as the [‘HVSR_ID’] attribute of the HVSRData object, -and is equivalent to the following formatted string: -f”{id_prefix}-{acq_date.strftime(“%Y%m%d”)}-{starttime.strftime(“%H%M”)}-{station}”.

-
-
networkstr, default=’AM’

The network designation of the seismometer. This is necessary for data from Raspberry Shakes. ‘AM’ is for Amateur network, which fits Raspberry Shakes.

-
-
stationstr, default=’RAC84’

The station name of the seismometer. This is necessary for data from Raspberry Shakes.

-
-
locstr, default=’00’

Location information of the seismometer.

-
-
channelslist, default=[‘EHZ’, ‘EHN’, ‘EHE’]

The three channels used in this analysis, as a list of strings. Preferred that Z component is first, but not necessary

-
-
acq_datestr, int, date object, or datetime object

If string, preferred format is ‘YYYY-MM-DD’. -If int, this will be interpreted as the time_int of year of current year (e.g., 33 would be Feb 2 of current year) -If date or datetime object, this will be the date. Make sure to account for time change when converting to UTC (if UTC is the following time_int, use the UTC time_int).

-
-
starttimestr, time object, or datetime object, default=’00:00:00.00’

Start time of data stream. This is necessary for Raspberry Shake data in ‘raw’ form, or for trimming data. Format can be either ‘HH:MM:SS.micros’ or ‘HH:MM’ at minimum.

-
-
endtimestr, time obejct, or datetime object, default=’23:59:99.99’

End time of data stream. This is necessary for Raspberry Shake data in ‘raw’ form, or for trimming data. Same format as starttime.

-
-
tzonestr or int, default = ‘UTC’

Timezone of input data. If string, ‘UTC’ will use the time as input directly. Any other string value needs to be a TZ identifier in the IANA database, a wikipedia page of these is available here: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. -If int, should be the int value of the UTC offset (e.g., for American Eastern Standard Time: -5). -This is necessary for Raspberry Shake data in ‘raw’ format.

-
-
xcoordfloat, default=-88.2290526

Longitude (or easting, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in table output, but will likely be used in future for mapping/profile purposes.

-
-
ycoordfloat, default=40.1012122

Latitute (or northing, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in table output, but will likely be used in future for mapping/profile purposes.

-
-
input_crsstr or other format read by pyproj, default=’EPSG:4326’

Coordinate reference system of input data, as used by pyproj.CRS.from_user_input()

-
-
output_crsstr or other format read by pyproj, default=’EPSG:4326’

Coordinate reference system to which input data will be transformed, as used by pyproj.CRS.from_user_input()

-
-
elevationfloat, default=755

Surface elevation of data point. Not currently used (except in table output), but will likely be used in the future.

-
-
depthfloat, default=0

Depth of seismometer. Not currently used, but will likely be used in the future.

-
-
instrumentstr {‘Raspberry Shake’, “Tromino”}

Instrument from which the data was acquired.

-
-
metapathstr or pathlib.Path object, default=None

Filepath of metadata, in format supported by obspy.read_inventory. If default value of None, will read from resources folder of repository (only supported for Raspberry Shake).

-
-
hvsr_bandlist, default=[0.4, 40]

Two-element list containing low and high “corner” frequencies (in Hz) for processing. This can specified again later.

-
-
peak_freq_rangelist or tuple, default=[0.4, 40]

Two-element list or tuple containing low and high frequencies (in Hz) that are used to check for HVSR Peaks. This can be a tigher range than hvsr_band, but if larger, it will still only use the hvsr_band range.

-
-
processing_parameters={}dict or filepath, default={}

If filepath, should point to a .proc json file with processing parameters (i.e, an output from sprit.export_settings()). -Note that this only applies to parameters for the functions: ‘fetch_data’, ‘remove_noise’, ‘generate_ppsds’, ‘process_hvsr’, ‘check_peaks’, and ‘get_report.’ -If dictionary, dictionary containing nested dictionaries of function names as they key, and the parameter names/values as key/value pairs for each key. -If a function name is not present, or if a parameter name is not present, default values will be used. -For example:

-
-

{ ‘fetch_data’ : {‘source’:’batch’, ‘data_export_path’:”/path/to/trimmed/data”, ‘data_export_format’:’mseed’, ‘detrend’:’spline’, ‘plot_input_stream’:True, ‘verbose’:False, kwargs:{‘kwargskey’:’kwargsvalue’}} }

-
-
-
verbosebool, default=False

Whether to print output and results to terminal

-
-
-
-
Returns:
-
-
paramssprit.HVSRData

sprit.HVSRData class containing input parameters, including data file path and metadata path. This will be used as an input to other functions. If batch processing, params will be converted to batch type in fetch_data() step.

-
-
-
-
-
- -
-
-sprit.sprit_hvsr.plot_azimuth(hvsr_data, fig=None, ax=None, show_azimuth_peaks=False, interpolate_azimuths=True, show_azimuth_grid=False, show_plot=True, **plot_azimuth_kwargs)[source]
-

Function to plot azimuths when azimuths are calculated

-
-
Parameters:
-
-
hvsr_dataHVSRData or HVSRBatch

HVSRData that has gone through at least the sprit.fetch_data() step, and before sprit.generate_ppsds()

-
-
show_azimuth_peaksbool, optional

Whether to display the peak value at each azimuth calculated on the chart, by default False

-
-
interpolate_azimuthsbool, optional

Whether to interpolate the azimuth data to get a smoother plot. -This is just for visualization, does not change underlying data. -It takes a lot of time to process the data, but interpolation for vizualization can happen fairly fast. By default True.

-
-
show_azimuth_gridbool, optional

Whether to display the grid on the chart, by default False

-
-
-
-
Returns:
-
-
matplotlib.Figure, matplotlib.Axis

Figure and axis of resulting azimuth plot

-
-
-
-
-
- -
-
-sprit.sprit_hvsr.plot_hvsr(hvsr_data, plot_type='HVSR ann p C+ ann p SPEC ann p', azimuth='HV', use_subplots=True, fig=None, ax=None, return_fig=False, plot_engine='matplotlib', save_dir=None, save_suffix='', show_legend=False, show_plot=True, close_figs=False, clear_fig=True, **kwargs)[source]
-

Function to plot HVSR data

-
-
Parameters:
-
-
hvsr_datadict

Dictionary containing output from process_hvsr function

-
-
plot_typestr or list, default = ‘HVSR ann p C+ ann p SPEC ann p’

The plot_type of plot(s) to plot. If list, will plot all plots listed -- ‘HVSR’ - Standard HVSR plot, including standard deviation. Options are included below:

-
-
    -
  • ‘p’ shows a vertical dotted line at frequency of the “best” peak

  • -
  • ‘ann’ annotates the frequency value of of the “best” peak

  • -
  • ‘all’ shows all the peaks identified in check_peaks() (by default, only the max is identified)

  • -
  • ‘t’ shows the H/V curve for all time windows

  • -
  • ‘tp’ shows all the peaks from the H/V curves of all the time windows

  • -
  • ‘fr’ shows the window within which SpRIT will search for peak frequencies, as set by peak_freq_range

  • -
  • -
    ‘test’ shows a visualization of the results of the peak validity test(s). Examples:
      -
    • ‘tests’ visualizes the results of all the peak tests (not the curve tests)

    • -
    • -
      ‘test12’ shows the results of tests 1 and 2.
        -
      • Append any number 1-6 after ‘test’ to show a specific test result visualized

      • -
      -
      -
      -
    • -
    -
    -
    -
  • -
-
-
    -
  • -
    ‘COMP’ - plot of the PPSD curves for each individual component (“C” also works)
      -
    • ‘+’ (as a suffix in ‘C+’ or ‘COMP+’) plots C on a plot separate from HVSR (C+ is default, but without + will plot on the same plot as HVSR)

    • -
    • ‘p’ shows a vertical dotted line at frequency of the “best” peak

    • -
    • ‘ann’ annotates the frequency value of of the “best” peak

    • -
    • ‘all’ shows all the peaks identified in check_peaks() (by default, only the max is identified)

    • -
    • ‘t’ shows the H/V curve for all time windows

    • -
    -
    -
    -
  • -
  • -
    ‘SPEC’ - spectrogram style plot of the H/V curve over time
      -
    • ‘p’ shows a horizontal dotted line at the frequency of the “best” peak

    • -
    • ‘ann’ annotates the frequency value of the “best” peak

    • -
    • ‘all’ shows all the peaks identified in check_peaks()

    • -
    • ‘tp’ shows all the peaks of the H/V curve at all time windows

    • -
    -
    -
    -
  • -
  • -
    ‘AZ’ - circular plot of calculated azimuthal HV curves, similar in style to SPEC plot.
      -
    • ‘p’ shows a point at each calculated (not interpolated) azimuth peak

    • -
    • ‘g’ shows grid lines at various angles

    • -
    • -
      ‘i’ interpolates so that there is an interpolated azimuth at each degree interval (1 degree step)

      This is the default, so usually ‘i’ is not needed.

      -
      -
      -
    • -
    • ‘-i’ prohibits interpolation (only shows the calculated azimuths, as determined by azimuth_angle (default = 30))

    • -
    -
    -
    -
  • -
-
-
azimuthstr, default = ‘HV’

What ‘azimuth’ to plot, default being standard N E components combined

-
-
use_subplotsbool, default = True

Whether to output the plots as subplots (True) or as separate plots (False)

-
-
figmatplotlib.Figure, default = None

If not None, matplotlib figure on which plot is plotted

-
-
axmatplotlib.Axis, default = None

If not None, matplotlib axis on which plot is plotted

-
-
return_figbool

Whether to return figure and axis objects

-
-
plot_enginestr, default=’Matplotlib’

Which engine to use for plotting. Both “matplotlib” and “plotly” are acceptable. For shorthand, ‘mpl’, ‘m’ also work for matplotlib; ‘plty’ or ‘p’ also work for plotly. Not case sensitive.

-
-
save_dirstr or None

Directory in which to save figures

-
-
save_suffixstr

Suffix to add to end of figure filename(s), if save_dir is used

-
-
show_legendbool, default=False

Whether to show legend in plot

-
-
show_plotbool

Whether to show plot

-
-
close_figsbool, default=False

Whether to close figures before plotting

-
-
clear_figbool, default=True

Whether to clear figures before plotting

-
-
**kwargskeyword arguments

Keyword arguments for matplotlib.pyplot

-
-
-
-
Returns:
-
-
fig, axmatplotlib figure and axis objects

Returns figure and axis matplotlib.pyplot objects if return_fig=True, otherwise, simply plots the figures

-
-
-
-
-
- -
-
-sprit.sprit_hvsr.plot_stream(stream, params, fig=None, axes=None, show_plot=False, ylim_std=0.75, return_fig=True)[source]
-

Function to plot a stream of data with Z, E, N components using matplotlib. Similar to obspy.Stream.Plot(), but will be formatted differently and eventually more customizable. -This is also used in various functions throughout the package.

-
-
Parameters:
-
-
streamobspy.core.Stream.stream

Obpsy stream of data with Z, E, N componenents

-
-
paramsHVSRData or HVSRBatch

Data object with parameters relevant for creating plot

-
-
figmatplotlib.Figure, default=None

Optional: if not None, matplotlib.Figure in which to plot the resulting figure (i.e., can be plotted in existing figure)

-
-
axesmatplotlib.Axis, default=None

Optional: if not None, matplotlib.Axis in which to plot the resulting figure (i.e., can be plotted in existing axis)

-
-
show_plotbool, default=False

Whether to do matplotlib.pylot.show(), by default False

-
-
ylim_stdfloat, default = 0.75

Optional: the standard deviation of the data at which to clip the chart, by default 0.75

-
-
return_figbool, default=True

Optional: whether to return the figure, by default True

-
-
-
-
Returns:
-
-
(matplotlib.Figure, matplotlib.Axes)

Tuple containing the figure and axes of the resulting plot, only returned if return_fig = True

-
-
-
-
-
- -
-
-sprit.sprit_hvsr.process_hvsr(hvsr_data, horizontal_method=None, smooth=True, freq_smooth='konno ohmachi', f_smooth_width=40, resample=True, outlier_curve_rmse_percentile=False, azimuth=None, verbose=False)[source]
-

Process the input data and get HVSR data

-

This is the main function that uses other (private) functions to do -the bulk of processing of the HVSR data and the data quality checks.

-
-
Parameters:
-
-
hvsr_dataHVSRData or HVSRBatch

Data object containing all the parameters input and generated by the user (usually, during sprit.input_params(), sprit.fetch_data(), sprit.generate_ppsds() and/or sprit.remove_noise()).

-
-
horizontal_methodint or str, default=3
-
Method to use for combining the horizontal components. Default is 3) Geometric Mean
    -
  1. (not used)

  2. -
  3. ‘Diffuse field assumption’ H = √( (eie_E + eie_N) / eie_Z), eie = equal interval energy

  4. -
  5. ‘Arithmetic Mean’ H ≡ (HN + HE)/2

  6. -
  7. ‘Geometric Mean’ H ≡ √(HN · HE), recommended by the SESAME project (2004)

  8. -
  9. ‘Vector Summation’ H ≡ √(HN^2 + HE^2)

  10. -
  11. ‘Quadratic Mean’ H ≡ √(HN^2 + HE^2)/2

  12. -
  13. ‘Maximum Horizontal Value’ H ≡ max {HN, HE}

  14. -
  15. ‘Minimum Horizontal Valey’ H ≡ min {HN, HE}

  16. -
  17. ‘Single Azimuth’ H = H2·cos(az) + H1·sin(az)

  18. -
-
-
-
-
smoothbool, default=True
-
bool or int may be used.

If True, default to smooth H/V curve to using savgoy filter with window length of 51 (works well with default resample of 1000 pts) -If int, the length of the window in the savgoy filter.

-
-
-
-
freq_smoothstr {‘konno ohmachi’, ‘constant’, ‘proportional’}
-
Which frequency smoothing method to use. By default, uses the ‘konno ohmachi’ method.
-
-
-

See here for more information: https://www.geopsy.org/documentation/geopsy/hv-processing.html

-
-
f_smooth_widthint, default = 40
    -
  • For ‘konno ohmachi’: passed directly to the bandwidth parameter of the konno_ohmachi_smoothing() function, determines the width of the smoothing peak, with lower values resulting in broader peak. Must be > 0.

  • -
  • For ‘constant’: the size of a triangular smoothing window in the number of frequency steps

  • -
  • For ‘proportional’: the size of a triangular smoothing window in percentage of the number of frequency steps (e.g., if 1000 frequency steps/bins and f_smooth_width=40, window would be 400 steps wide)

  • -
-
-
resamplebool, default = True
-
bool or int.

If True, default to resample H/V data to include 1000 frequency values for the rest of the analysis -If int, the number of data points to interpolate/resample/smooth the component psd/HV curve data to.

-
-
-
-
outlier_curve_rmse_percentilebool, float, default = False

If False, outlier curve removal is not carried out here. -If True, defaults to 98 (98th percentile). -Otherwise, float of percentile used as rmse_thresh of remove_outlier_curve().

-
-
azimuthfloat, default = None

The azimuth angle to use when method is single azimuth.

-
-
verbosebool, defualt=False

Whether to print output to terminal

-
-
-
-
Returns:
-
-
hvsr_outdict

Dictionary containing all the information about the data, including input parameters

-
-
-
-
-
- -
-
-sprit.sprit_hvsr.read_tromino_files(input_data, params, struct_format='H', sampling_rate=128, start_byte=24576, verbose=False, **kwargs)[source]
-

Function to read data from tromino. Specifically, this has been lightly tested on Tromino 3G+ machines

-
-
Parameters:
-
-
input_datastr, pathlib.Path()

The input parameter _datapath_ from sprit.input_params()

-
-
paramsHVSRData or HVSRBatch

The parameters as read in from input_params() and and fetch_data()

-
-
verbosebool, optional

Whether to print results to terminal, by default False

-
-
-
-
Returns:
-
-
obspy.Stream

An obspy.Stream object containing the trace data from the Tromino instrument

-
-
-
-
-
- -
-
-sprit.sprit_hvsr.remove_noise(hvsr_data, remove_method=None, processing_window=None, sat_percent=0.995, noise_percent=0.8, sta=2, lta=30, stalta_thresh=[8, 16], warmup_time=0, cooldown_time=0, min_win_size=1, remove_raw_noise=False, show_stalta_plot=False, verbose=False)[source]
-

Function to remove noisy windows from data, using various methods.

-

Methods include -- Manual window selection (by clicking on a chart with spectrogram and stream data), -- Auto window selection, which does the following two in sequence (these can also be done indepently):

-
-
    -
  • A sta/lta “antitrigger” method (using stalta values to automatically remove triggered windows where there appears to be too much noise)

  • -
  • A noise threshold method, that cuts off all times where the noise threshold equals more than (by default) 80% of the highest amplitude noise sample for the length specified by lta (in seconds)

  • -
  • A saturation threshold method, that cuts off all times where the noise threshold equals more than (by default) 99.5% of the highest amplitude noise sample.

  • -
-
-
-
Parameters:
-
-
hvsr_datadict, obspy.Stream, or obspy.Trace

Dictionary containing all the data and parameters for the HVSR analysis

-
-
remove_methodstr, {‘auto’, ‘manual’, ‘stalta’/’antitrigger’, ‘saturation threshold’, ‘noise threshold’, ‘warmup’/’cooldown’/’buffer’/’warm_cool’}

The different methods for removing noise from the dataset. A list of strings will also work, in which case, it should be a list of the above strings. See descriptions above for what how each method works. By default ‘auto.’ -If remove_method=’auto’, this is the equivalent of remove_method=[‘noise threshold’, ‘antitrigger’, ‘saturation threshold’, ‘warm_cool’]

-
-
processing_windowlist, tuple, or None

A list/tuple of two items [s, e] or a list/tuple of two-item lists/tuples [[s0, e0], [s1,e1],…[sn, en]] with start and end time(s) for windows to keep for processing. -Data outside of these times will be excluded from processing. -Times should be obspy.UTCDateTime objects to ensure precision, but time strings (“13:05”) will also work in most cases (excpetions may be when the data stream starts/ends on different UTC days)

-
-
sat_percentfloat, default=0.995

Percentage (between 0 and 1), to use as the threshold at which to remove data. This is used in the saturation method. By default 0.995. -If a value is passed that is greater than 1, it will be divided by 100 to obtain the percentage.

-
-
noise_percentfloat, default = 0.8

Percentage (between 0 and 1), to use as the threshold at which to remove data, if it persists for longer than time (in seconds (specified by min_win_size)). This is used in the noise threshold method. By default 0.8. -If a value is passed that is greater than 1, it will be divided by 100 to obtain the percentage.

-
-
staint, optional

Short term average (STA) window (in seconds), by default 2. For use with sta/lta antitrigger method.

-
-
ltaint, optional

Long term average (STA) window (in seconds), by default 30. For use with sta/lta antitrigger method.

-
-
stalta_threshlist, default=[0.5,5]

Two-item list or tuple with the thresholds for the stalta antitrigger. The first value (index [0]) is the lower threshold, the second value (index [1] is the upper threshold), by default [0.5,5]

-
-
warmup_timeint, default=0

Time in seconds to allow for warmup of the instrument (or while operator is still near instrument). This will renove any data before this time, by default 0.

-
-
cooldown_timeint, default=0

Time in seconds to allow for cooldown of the instrument (or for when operator is nearing instrument). This will renove any data before this time, by default 0.

-
-
min_win_sizefloat, default=1

The minumum size a window must be over specified threshold (in seconds) for it to be removed

-
-
remove_raw_noisebool, default=False

If remove_raw_noise=True, will perform operation on raw data (‘input_stream’), rather than potentially already-modified data (‘stream’).

-
-
verbosebool, default=False

Whether to print status of remove_noise

-
-
-
-
Returns:
-
-
outputdict

Dictionary similar to hvsr_data, but containing modified data with ‘noise’ removed

-
-
-
-
-
- -
-
-sprit.sprit_hvsr.remove_outlier_curves(hvsr_data, rmse_thresh=98, use_percentile=True, use_hv_curve=False, plot_engine='matplotlib', show_plot=False, verbose=False)[source]
-

Function used to remove outliers curves using Root Mean Square Error to calculate the error of each windowed -Probabilistic Power Spectral Density (PPSD) curve against the median PPSD value at each frequency step for all times. -It calculates the RMSE for the PPSD curves of each component individually. All curves are removed from analysis.

-

Some abberant curves often occur due to the remove_noise() function, so this should be run some time after remove_noise(). -In general, the recommended workflow is to run this immediately following the generate_ppsds() function.

-
-
Parameters:
-
-
hvsr_datadict

Input dictionary containing all the values and parameters of interest

-
-
rmse_threshfloat or int, default=98

The Root Mean Square Error value to use as a threshold for determining whether a curve is an outlier. -This averages over each individual entire curve so that curves with very abberant data (often occurs when using the remove_noise() method), can be identified. -Otherwise, specify a float or integer to use as the cutoff RMSE value (all curves with RMSE above will be removed)

-
-
use_percentilefloat, default=True

Whether rmse_thresh should be interepreted as a raw RMSE value or as a percentile of the RMSE values.

-
-
use_hv_curvebool, default=False

Whether to use the calculated HV Curve or the individual components. This can only be True after process_hvsr() has been run.

-
-
show_plotbool, default=False

Whether to show a plot of the removed data

-
-
verbosebool, default=False

Whether to print output of function to terminal

-
-
-
-
Returns:
-
-
hvsr_datadict

Input dictionary with values modified based on work of function.

-
-
-
-
-
- -
-
-sprit.sprit_hvsr.run(input_data, source='file', azimuth_calculation=False, noise_removal=False, outlier_curves_removal=False, verbose=False, **kwargs)[source]
-

The sprit.run() is the main function that allows you to do all your HVSR processing in one simple step (sprit.run() is how you would call it in your code, but it may also be called using sprit.sprit_hvsr.run())

-

The input_data parameter of sprit.run() is the only required parameter. This can be either a single file, a list of files (one for each component, for example), a directory (in which case, all obspy-readable files will be added to an HVSRBatch instance), a Rasp. Shake raw data directory, or sample data.

-
-
Parameters:
-
-
input_datastr or filepath object that can be read by obspy

Filepath to data to be processed. This may be a file or directory, depending on what kind of data is being processed (this can be specified with the source parameter). -For sample data, The following can be specified as the input_data parameter:

-
-
    -
  • Any integer 1-6 (inclusive), or the string (e.g., input_data=”1” or input_data=1 will work)

  • -
  • The word “sample” before any integer (e.g., input_data=”sample1”)

  • -
  • The word “sample” will default to “sample1” if source=’file’.

  • -
  • If source=’batch’, input_data should be input_data=’sample’ or input_data=’batch’. In this case, it will read and process all the sample files using the HVSRBatch class. Set verbose=True to see all the information in the sample batch csv file.

  • -
-
-
-
sourcestr, optional

_description_, by default ‘file’

-
-
azimuth_calculationbool, optional

Whether to perform azimuthal analysis, by default False.

-
-
noise_removalbool, default=False

Whether to remove noise (before processing PPSDs)

-
-
outlier_curves_removalbool, default=False

Whether to remove outlier curves from HVSR time windows

-
-
show_plotbool, default=True

Whether to show plots. This does not affect whether the plots are created (and then inserted as an attribute of HVSRData), only whether they are shown.

-
-
verbosebool, optional

_description_, by default False

-
-
**kwargs

Keyword arguments for the functions listed above. The keyword arguments are unique, so they will get parsed out and passed into the appropriate function.

-
-
input_paramsfunction name (not an actual parameter)

Function for designating input parameters for reading in and processing data -See API documentation: [input_params()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.input_params)

-
-
input_data: any, default = ‘<no default>’

See API documentation at link above or at help(sprit.input_params) for specifics.

-
-
site: any, default = ‘HVSR Site’

See API documentation at link above or at help(sprit.input_params) for specifics.

-
-
id_prefix: any, default = None

See API documentation at link above or at help(input_params) for specifics.

-
-
network: any, default = ‘AM’

See API documentation at link above or at help(sprit.input_params) for specifics.

-
-
station: any, default = ‘RAC84’

See API documentation at link above or at help(sprit.input_params) for specifics.

-
-
loc: any, default = ‘00’

See API documentation at link above or at help(sprit.input_params) for specifics.

-
-
channels: any, default = [‘EHZ’, ‘EHN’, ‘EHE’]

See API documentation at link above or at help(input_params) for specifics.

-
-
acq_date: any, default = ‘2024-10-30’

See API documentation at link above or at help(sprit.input_params) for specifics.

-
-
starttime: any, default = 2024-10-30T00:00:00.000000Z

See API documentation at link above or at help(input_params) for specifics.

-
-
endtime: any, default = 2024-10-30T23:59:59.999999Z

See API documentation at link above or at help(input_params) for specifics.

-
-
tzone: any, default = ‘UTC’

See API documentation at link above or at help(sprit.input_params) for specifics.

-
-
xcoord: any, default = -88.2290526

See API documentation at link above or at help(input_params) for specifics.

-
-
ycoord: any, default = 40.1012122

See API documentation at link above or at help(input_params) for specifics.

-
-
elevation: any, default = 755

See API documentation at link above or at help(input_params) for specifics.

-
-
input_crs: any, default = None

See API documentation at link above or at help(input_params) for specifics.

-
-
output_crs: any, default = None

See API documentation at link above or at help(input_params) for specifics.

-
-
elev_unit: any, default = ‘meters’

See API documentation at link above or at help(sprit.input_params) for specifics.

-
-
depth: any, default = 0

See API documentation at link above or at help(input_params) for specifics.

-
-
instrument: any, default = ‘Raspberry Shake’

See API documentation at link above or at help(sprit.input_params) for specifics.

-
-
metapath: any, default = None

See API documentation at link above or at help(input_params) for specifics.

-
-
hvsr_band: any, default = [0.4, 40]

See API documentation at link above or at help(input_params) for specifics.

-
-
peak_freq_range: any, default = [0.4, 40]

See API documentation at link above or at help(input_params) for specifics.

-
-
processing_parameters: any, default = {}

See API documentation at link above or at help(input_params) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(input_params) for specifics.

-
-
fetch_datafunction name (not an actual parameter)

Fetch ambient seismic data from a source to read into obspy stream -See API documentation: [fetch_data()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.fetch_data)

-
-
params: any, default = ‘<output of previous function>’

See API documentation at link above or at help(sprit.fetch_data) for specifics.

-
-
source: any, default = ‘file’

See API documentation at link above or at help(sprit.fetch_data) for specifics.

-
-
data_export_path: any, default = None

See API documentation at link above or at help(fetch_data) for specifics.

-
-
data_export_format: any, default = ‘mseed’

See API documentation at link above or at help(sprit.fetch_data) for specifics.

-
-
detrend: any, default = ‘spline’

See API documentation at link above or at help(sprit.fetch_data) for specifics.

-
-
detrend_order: any, default = 2

See API documentation at link above or at help(fetch_data) for specifics.

-
-
update_metadata: any, default = True

See API documentation at link above or at help(fetch_data) for specifics.

-
-
plot_input_stream: any, default = False

See API documentation at link above or at help(fetch_data) for specifics.

-
-
plot_engine: any, default = ‘matplotlib’

See API documentation at link above or at help(sprit.fetch_data) for specifics.

-
-
show_plot: any, default = True

See API documentation at link above or at help(fetch_data) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(fetch_data) for specifics.

-
-
kwargs: any, default = {}

See API documentation at link above or at help(fetch_data) for specifics.

-
-
calculate_azimuthfunction name (not an actual parameter)

Function to calculate azimuthal horizontal component at specified angle(s). Adds each new horizontal -See API documentation: [calculate_azimuth()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.calculate_azimuth)

-
-
hvsr_data: any, default = ‘<output of previous function>’

See API documentation at link above or at help(sprit.calculate_azimuth) for specifics.

-
-
azimuth_angle: any, default = 30

See API documentation at link above or at help(calculate_azimuth) for specifics.

-
-
azimuth_type: any, default = ‘multiple’

See API documentation at link above or at help(sprit.calculate_azimuth) for specifics.

-
-
azimuth_unit: any, default = ‘degrees’

See API documentation at link above or at help(sprit.calculate_azimuth) for specifics.

-
-
show_az_plot: any, default = False

See API documentation at link above or at help(calculate_azimuth) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(calculate_azimuth) for specifics.

-
-
plot_azimuth_kwargs: any, default = {}

See API documentation at link above or at help(calculate_azimuth) for specifics.

-
-
remove_noisefunction name (not an actual parameter)

Function to remove noisy windows from data, using various methods. -See API documentation: [remove_noise()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.remove_noise)

-
-
hvsr_data: any, default = ‘<output of previous function>’

See API documentation at link above or at help(sprit.remove_noise) for specifics.

-
-
remove_method: any, default = None

See API documentation at link above or at help(remove_noise) for specifics.

-
-
processing_window: any, default = None

See API documentation at link above or at help(remove_noise) for specifics.

-
-
sat_percent: any, default = 0.995

See API documentation at link above or at help(remove_noise) for specifics.

-
-
noise_percent: any, default = 0.8

See API documentation at link above or at help(remove_noise) for specifics.

-
-
sta: any, default = 2

See API documentation at link above or at help(remove_noise) for specifics.

-
-
lta: any, default = 30

See API documentation at link above or at help(remove_noise) for specifics.

-
-
stalta_thresh: any, default = [8, 16]

See API documentation at link above or at help(remove_noise) for specifics.

-
-
warmup_time: any, default = 0

See API documentation at link above or at help(remove_noise) for specifics.

-
-
cooldown_time: any, default = 0

See API documentation at link above or at help(remove_noise) for specifics.

-
-
min_win_size: any, default = 1

See API documentation at link above or at help(remove_noise) for specifics.

-
-
remove_raw_noise: any, default = False

See API documentation at link above or at help(remove_noise) for specifics.

-
-
show_stalta_plot: any, default = False

See API documentation at link above or at help(remove_noise) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(remove_noise) for specifics.

-
-
generate_ppsdsfunction name (not an actual parameter)

Generates PPSDs for each channel -See API documentation: [generate_ppsds()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.generate_ppsds)

-
-
hvsr_data: any, default = ‘<output of previous function>’

See API documentation at link above or at help(sprit.generate_ppsds) for specifics.

-
-
azimuthal_ppsds: any, default = False

See API documentation at link above or at help(generate_ppsds) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(generate_ppsds) for specifics.

-
-
ppsd_kwargs: any, default = {}

See API documentation at link above or at help(generate_ppsds) for specifics.

-
-
process_hvsrfunction name (not an actual parameter)

Process the input data and get HVSR data -See API documentation: [process_hvsr()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.process_hvsr)

-
-
hvsr_data: any, default = ‘<output of previous function>’

See API documentation at link above or at help(sprit.process_hvsr) for specifics.

-
-
horizontal_method: any, default = None

See API documentation at link above or at help(process_hvsr) for specifics.

-
-
smooth: any, default = True

See API documentation at link above or at help(process_hvsr) for specifics.

-
-
freq_smooth: any, default = ‘konno ohmachi’

See API documentation at link above or at help(sprit.process_hvsr) for specifics.

-
-
f_smooth_width: any, default = 40

See API documentation at link above or at help(process_hvsr) for specifics.

-
-
resample: any, default = True

See API documentation at link above or at help(process_hvsr) for specifics.

-
-
outlier_curve_rmse_percentile: any, default = False

See API documentation at link above or at help(process_hvsr) for specifics.

-
-
azimuth: any, default = None

See API documentation at link above or at help(process_hvsr) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(process_hvsr) for specifics.

-
-
remove_outlier_curvesfunction name (not an actual parameter)

Function used to remove outliers curves using Root Mean Square Error to calculate the error of each -See API documentation: [remove_outlier_curves()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.remove_outlier_curves)

-
-
hvsr_data: any, default = ‘<output of previous function>’

See API documentation at link above or at help(sprit.remove_outlier_curves) for specifics.

-
-
rmse_thresh: any, default = 98

See API documentation at link above or at help(remove_outlier_curves) for specifics.

-
-
use_percentile: any, default = True

See API documentation at link above or at help(remove_outlier_curves) for specifics.

-
-
use_hv_curve: any, default = False

See API documentation at link above or at help(remove_outlier_curves) for specifics.

-
-
plot_engine: any, default = ‘matplotlib’

See API documentation at link above or at help(sprit.remove_outlier_curves) for specifics.

-
-
show_plot: any, default = False

See API documentation at link above or at help(remove_outlier_curves) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(remove_outlier_curves) for specifics.

-
-
check_peaksfunction name (not an actual parameter)

Function to run tests on HVSR peaks to find best one and see if it passes quality checks -See API documentation: [check_peaks()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.check_peaks)

-
-
hvsr_data: any, default = ‘<output of previous function>’

See API documentation at link above or at help(sprit.check_peaks) for specifics.

-
-
hvsr_band: any, default = [0.4, 40]

See API documentation at link above or at help(check_peaks) for specifics.

-
-
peak_selection: any, default = ‘max’

See API documentation at link above or at help(sprit.check_peaks) for specifics.

-
-
peak_freq_range: any, default = [0.4, 40]

See API documentation at link above or at help(check_peaks) for specifics.

-
-
azimuth: any, default = ‘HV’

See API documentation at link above or at help(sprit.check_peaks) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(check_peaks) for specifics.

-
-
get_reportfunction name (not an actual parameter)

Generate and/or print and/or export a report of the HVSR analysis in a variety of formats. -See API documentation: [get_report()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.get_report)

-
-
hvsr_results: any, default = ‘<output of previous function>’

See API documentation at link above or at help(sprit.get_report) for specifics.

-
-
report_formats: any, default = [‘print’, ‘table’, ‘plot’, ‘html’, ‘pdf’]

See API documentation at link above or at help(get_report) for specifics.

-
-
azimuth: any, default = ‘HV’

See API documentation at link above or at help(sprit.get_report) for specifics.

-
-
plot_type: any, default = ‘HVSR p ann C+ p ann Spec p ann’

See API documentation at link above or at help(sprit.get_report) for specifics.

-
-
plot_engine: any, default = ‘matplotlib’

See API documentation at link above or at help(sprit.get_report) for specifics.

-
-
show_print_report: any, default = True

See API documentation at link above or at help(get_report) for specifics.

-
-
show_table_report: any, default = False

See API documentation at link above or at help(get_report) for specifics.

-
-
show_plot_report: any, default = True

See API documentation at link above or at help(get_report) for specifics.

-
-
show_html_report: any, default = False

See API documentation at link above or at help(get_report) for specifics.

-
-
show_pdf_report: any, default = True

See API documentation at link above or at help(get_report) for specifics.

-
-
suppress_report_outputs: any, default = False

See API documentation at link above or at help(get_report) for specifics.

-
-
show_report_outputs: any, default = False

See API documentation at link above or at help(get_report) for specifics.

-
-
csv_handling: any, default = ‘append’

See API documentation at link above or at help(sprit.get_report) for specifics.

-
-
report_export_format: any, default = None

See API documentation at link above or at help(get_report) for specifics.

-
-
report_export_path: any, default = None

See API documentation at link above or at help(get_report) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(get_report) for specifics.

-
-
kwargs: any, default = {}

See API documentation at link above or at help(get_report) for specifics.

-
-
export_datafunction name (not an actual parameter)

Export data into pickle format that can be read back in using import_data() so data does not need to -See API documentation: [export_data()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.export_data)

-
-
hvsr_data: any, default = ‘<output of previous function>’

See API documentation at link above or at help(sprit.export_data) for specifics.

-
-
hvsr_export_path: any, default = None

See API documentation at link above or at help(export_data) for specifics.

-
-
ext: any, default = ‘hvsr’

See API documentation at link above or at help(sprit.export_data) for specifics.

-
-
verbose: any, default = False

See API documentation at link above or at help(export_data) for specifics.

-
-
-
-
Returns:
-
-
hvsr_resultssprit.HVSRData or sprit.HVSRBatch object

If a single file/data point is being processed, a HVSRData object will be returned. Otherwise, it will be a HVSRBatch object. See their documention for more information.

-
-
-
-
Raises:
-
-
RuntimeError

If the input parameter may not be read correctly. This is raised if the input_params() function fails. This raises an error since no other data processing or reading steps will be able to carried out correctly.

-
-
RuntimeError

If the data is not read/fetched correctly using fetch_data(), an error will be raised. This is raised if the fetch_data() function fails. This raises an error since no other data processing steps will be able to carried out correctly.

-
-
RuntimeError

If the data being processed is a single file, an error will be raised if generate_ppsds() does not work correctly. No errors are raised for remove_noise() errors (since that is an optional step) and the process_hvsr() step (since that is the last processing step) .

-
-
-
-
-

Notes

-

The sprit.run() function calls the following functions. This is the recommended order/set of functions to run to process HVSR using SpRIT. See the API documentation for these functions for more information: -- input_params(): The input_data parameter of input_params() is the only required variable, though others may also need to be called for your data to process correctly. -- fetch_data(): the source parameter of fetch_data() is the only explicit variable in the sprit.run() function aside from input_data and verbose. Everything else gets delivered to the correct function via the kwargs dictionary -- remove_noise(): by default, the kind of noise removal is remove_method=’auto’. See the remove_noise() documentation for more information. If remove_method is set to anything other than one of the explicit options in remove_noise, noise removal will not be carried out. -- generate_ppsds(): generates ppsds for each component, which will be combined/used later. Any parameter of obspy.signal.spectral_estimation.PPSD() may also be read into this function. -- remove_outlier_curves(): removes any outlier ppsd curves so that the data quality for when curves are combined will be enhanced. See the remove_outlier_curves() documentation for more information. -- process_hvsr(): this is the main function processing the hvsr curve and statistics. See process_hvsr() documentation for more details. The hvsr_band parameter sets the frequency spectrum over which these calculations occur. -- check_peaks(): this is the main function that will find and ‘score’ peaks to get a best peak. The parameter peak_freq_range can be set to limit the frequencies within which peaks are checked and scored. -- get_report(): this is the main function that will print, plot, and/or save the results of the data. See the get_report() API documentation for more information. -- export_data(): this function exports the final data output as a pickle file (by default, this pickle object has a .hvsr extension). This can be used to read data back into SpRIT without having to reprocess data.

-
- -
-
-sprit.sprit_hvsr.test_function()[source]
-
- -
- - -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/sprit.sprit_jupyter_UI.html b/docs/_build/html/sprit.sprit_jupyter_UI.html deleted file mode 100644 index 35808a1c..00000000 --- a/docs/_build/html/sprit.sprit_jupyter_UI.html +++ /dev/null @@ -1,180 +0,0 @@ - - - - - - - sprit.sprit_jupyter_UI module — sprit 1.4 documentation - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -
-

sprit.sprit_jupyter_UI module

-

Functions to create jupyter notebook widget UI

-
-
-sprit.sprit_jupyter_UI.create_jupyter_ui()[source]
-
- -
-
-sprit.sprit_jupyter_UI.get_default(func, param)[source]
-
- -
- - -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/sprit.sprit_plot.html b/docs/_build/html/sprit.sprit_plot.html deleted file mode 100644 index aabd2507..00000000 --- a/docs/_build/html/sprit.sprit_plot.html +++ /dev/null @@ -1,257 +0,0 @@ - - - - - - - sprit.sprit_plot module — sprit 1.4 documentation - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -
-

sprit.sprit_plot module

-
-
-sprit.sprit_plot.get_check_peaks_kwargs()[source]
-
- -
-
-sprit.sprit_plot.get_generate_ppsd_kwargs()[source]
-
- -
-
-sprit.sprit_plot.get_get_report_kwargs()[source]
-
- -
-
-sprit.sprit_plot.get_process_hvsr_kwargs()[source]
-
- -
-
-sprit.sprit_plot.get_remove_noise_kwargs()[source]
-
- -
-
-sprit.sprit_plot.get_remove_outlier_curve_kwargs()[source]
-
- -
-
-sprit.sprit_plot.parse_comp_plot_list(hv_data, comp_plot_list, plot_with_hv=False, results_fig=None, azimuth='HV')[source]
-
- -
-
-sprit.sprit_plot.parse_hv_plot_list(hv_data, hvsr_plot_list, results_fig=None, azimuth='HV')[source]
-
- -
-
-sprit.sprit_plot.parse_plot_string(plot_string)[source]
-
- -
-
-sprit.sprit_plot.parse_spec_plot_list(hv_data, spec_plot_list, subplot_num, results_fig=None, azimuth='HV')[source]
-
- -
-
-sprit.sprit_plot.plot_outlier_curves(hvsr_data, plot_engine='plotly', rmse_thresh=0.98, use_percentile=True, use_hv_curve=False, from_roc=False, show_plot=True, verbose=False)[source]
-
- -
-
-sprit.sprit_plot.plot_preview(hv_data, stream=None, preview_fig=None, spectrogram_component='Z', show_plot=True, return_fig=False)[source]
-
- -
-
-sprit.sprit_plot.plot_results(hv_data, plot_string='HVSR p ann C+ p SPEC ann', results_fig=None, results_graph_widget=None, return_fig=False, show_results_plot=True)[source]
-
- -
-
-sprit.sprit_plot.process_data(button)[source]
-
- -
-
-sprit.sprit_plot.read_data(button)[source]
-
- -
- - -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/sprit.sprit_streamlit_ui.html b/docs/_build/html/sprit.sprit_streamlit_ui.html deleted file mode 100644 index 806688b6..00000000 --- a/docs/_build/html/sprit.sprit_streamlit_ui.html +++ /dev/null @@ -1,209 +0,0 @@ - - - - - - - sprit.sprit_streamlit_ui module — sprit 1.4 documentation - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -
-

sprit.sprit_streamlit_ui module

-
-
-sprit.sprit_streamlit_ui.check_if_default()[source]
-
- -
-
-sprit.sprit_streamlit_ui.on_file_upload()[source]
-
- -
-
-sprit.sprit_streamlit_ui.on_run_data()[source]
-
- -
-
-sprit.sprit_streamlit_ui.print_param(key=None, show_type=True)[source]
-
- -
-
-sprit.sprit_streamlit_ui.setup_session_state()[source]
-
- -
-
-sprit.sprit_streamlit_ui.text_change(verbose=False)[source]
-
- -
-
-sprit.sprit_streamlit_ui.write_to_info_tab(info_tab)[source]
-
- -
- - -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/sprit.sprit_tkinter_ui.html b/docs/_build/html/sprit.sprit_tkinter_ui.html deleted file mode 100644 index 1bbff889..00000000 --- a/docs/_build/html/sprit.sprit_tkinter_ui.html +++ /dev/null @@ -1,237 +0,0 @@ - - - - - - - sprit.sprit_tkinter_ui module — sprit 1.4 documentation - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -
-

sprit.sprit_tkinter_ui module

-

This script contains all the functions, classes, etc. to create a tkinter app for graphical user interface.

-
-
-class sprit.sprit_tkinter_ui.SPRIT_App(master)[source]
-

Bases: object

-

Methods

- - - - - - - - - -

log_errorMsg(logMsg)

manual_label_update()

- - - - - - - - - -

create_menubar

create_tabs

-
-
-create_menubar()[source]
-
- -
-
-create_tabs()[source]
-
- -
-
-log_errorMsg(logMsg)[source]
-
- -
-
-manual_label_update()[source]
-
- -
- -
-
-sprit.sprit_tkinter_ui.catch_errors(func)[source]
-
- -
-
-sprit.sprit_tkinter_ui.on_closing()[source]
-
- -
-
-sprit.sprit_tkinter_ui.reboot_app()[source]
-

Restarts the current program. -Note: this function does not return. Any cleanup action (like -saving data) must be done before calling this function.

-
- -
- - -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/sprit.sprit_utils.html b/docs/_build/html/sprit.sprit_utils.html deleted file mode 100644 index 11227bc3..00000000 --- a/docs/_build/html/sprit.sprit_utils.html +++ /dev/null @@ -1,303 +0,0 @@ - - - - - - - sprit.sprit_utils module — sprit 1.4 documentation - - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -
-

sprit.sprit_utils module

-
-
-sprit.sprit_utils.assert_check(var, cond=None, var_type=None, error_message='Output not valid', verbose=False)[source]
-
- -
-
-sprit.sprit_utils.check_gui_requirements()[source]
-
- -
-
-sprit.sprit_utils.check_mark(incolor=False, interminal=False)[source]
-

The default Windows terminal is not able to display the check mark character correctly. -This function returns another displayable character if platform is Windows

-
- -
-
-sprit.sprit_utils.check_tsteps(hvsr_data)[source]
-

Check time steps of PPSDS to make sure they are all the same length

-
- -
-
-sprit.sprit_utils.check_xvalues(ppsds)[source]
-

Check x_values of PPSDS to make sure they are all the same length

-
- -
-
-sprit.sprit_utils.checkifpath(filepath, sample_list='', verbose=False, raise_error=False)[source]
-

Support function to check if a filepath is a pathlib.Path object and tries to convert if not

-
-
Parameters:
-
-
filepathstr or pathlib.Path, or anything

Filepath to check. If not a valid filepath, will not convert and raises error

-
-
-
-
Returns:
-
-
filepathpathlib.Path

pathlib.Path of filepath

-
-
-
-
-
- -
-
-sprit.sprit_utils.format_time(inputDT, tzone='UTC')[source]
-

Private function to format time, used in other functions

-

Formats input time to datetime objects in utc

-
-
Parameters:
-
-
inputDTstr or datetime obj

Input datetime. Can include date and time, just date (time inferred to be 00:00:00.00) or just time (if so, date is set as today)

-
-
tzonestr=’utc’ or int {‘utc’, ‘local’}
-
Timezone of data entry.

If string and not utc, assumed to be timezone of computer running the process. -If int, assumed to be offset from UTC (e.g., CST in the United States is -6; CDT in the United States is -5)

-
-
-
-
-
-
Returns:
-
-
outputTimeObjdatetime object in UTC

Output datetime.datetime object, now in UTC time.

-
-
-
-
-
- -
-
-sprit.sprit_utils.get_char(in_char)[source]
-

Outputs character with proper encoding/decoding

-
- -
-
-sprit.sprit_utils.has_required_channels(stream)[source]
-
- -
-
-sprit.sprit_utils.make_it_classy(input_data, verbose=False)[source]
-
- -
-
-sprit.sprit_utils.read_from_RS(dest, src='SHAKENAME@HOSTNAME:/opt/data/archive/YEAR/AM/STATION/', opts='az', username='myshake', password='shakeme', hostname='rs.local', year='2023', sta='RAC84', sleep_time=0.1, verbose=True, save_progress=True, method='scp')[source]
-
- -
-
-sprit.sprit_utils.time_it(_t, proc_name='', verbose=True)[source]
-

Computes elapsed time since the last call.

-
- -
-
-sprit.sprit_utils.x_mark(incolor=False, inTerminal=False)[source]
-

The default Windows terminal is not able to display the check mark character correctly. -This function returns another displayable character if platform is Windows

-
- -
- - -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_generate_docs.py b/docs/_generate_docs.py index c60e1c51..44e07a2d 100644 --- a/docs/_generate_docs.py +++ b/docs/_generate_docs.py @@ -16,7 +16,7 @@ # Whether to CONVERT_MD using markdown (True), or github (False) RTD_DOCS = True -GITHUB_PAGES = False # Don't think I need this anymore, and it still works +GITHUB_PAGES = True # Don't think I need this anymore, and it still works CONVERT_MD = True RTD_THEME = False # Not currently working, for github pages diff --git a/docs/_build/html/search.html b/docs/_modules/index.html similarity index 51% rename from docs/_build/html/search.html rename to docs/_modules/index.html index 0e383773..154a31f1 100644 --- a/docs/_build/html/search.html +++ b/docs/_modules/index.html @@ -1,26 +1,23 @@ - + - Search — sprit 1.4 documentation - - + Overview: module code — sprit 1.4 documentation + + - - - - - - - - - + + + + + + @@ -31,11 +28,11 @@ - + sprit
-
+ @@ -44,9 +41,9 @@
@@ -55,15 +52,15 @@
    -
  • - +
  • +
@@ -72,18 +69,15 @@
- - - -
- -
+

All modules for which code is available

+
@@ -109,14 +103,7 @@ jQuery(function () { SphinxRtdTheme.Navigation.enable(true); }); - - - - - - + \ No newline at end of file diff --git a/docs/_modules/sprit/sprit_cli.html b/docs/_modules/sprit/sprit_cli.html new file mode 100644 index 00000000..107d1afb --- /dev/null +++ b/docs/_modules/sprit/sprit_cli.html @@ -0,0 +1,224 @@ + + + + + + sprit.sprit_cli — sprit 1.4 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for sprit.sprit_cli

+"""This module/script is used to run sprit from the command line. 
+
+The arguments here should correspond to any of the keyword arguments that can be used with sprit.run() (or sprit_hvsr.run()). See the run() function's documentation for more information, or the individual functions that are run within it.
+
+For list inputs, you should pass the argument multiple times(e.g., --report_format "csv" --report_format "print" --report_format "plot"). (In the case of --report_format, you can also just use "all" to get csv, print, and plot report types)
+
+The input_data parameter of input_params() is the only required argument, though for your data processing to work correctly and to be formatted correctly, you may need to pass others as well.
+"""
+
+import argparse
+import inspect
+try:
+    import sprit  # When distributed
+except:
+    import sprit_hvsr as sprit #When testing
+
+
+[docs] +def get_param_docstring(func, param_name): + function_docstring = func.__doc__ + + # Search for the parameter's docstring within the function's docstring + param_docstring = None + if function_docstring: + param_start = function_docstring.find(f'{param_name} :') + param_start = param_start + len(f'{param_name} :') + if param_start != -1: + param_end_line1 = function_docstring.find('\n', param_start + 1) + param_end = function_docstring.find('\n', param_end_line1 + 1) + if param_end != -1: + param_docstring = function_docstring[param_start:param_end].strip() + + if param_docstring is None: + param_docstring = '' + return param_docstring
+ + +
+[docs] +def main(): + parser = argparse.ArgumentParser(description='CLI for SPRIT HVSR package (specifically the sprit.run() function)') + + hvsrFunctions = [sprit.input_params, + sprit.fetch_data, + sprit.remove_noise, + sprit.generate_ppsds, + sprit.process_hvsr, + sprit.remove_outlier_curves, + sprit.check_peaks, + sprit.get_report]#, + #sprit.plot_hvsr] + + #Get default parameters from main functions + parameters = [] + for f in hvsrFunctions: + parameters.append(inspect.signature(f).parameters) + #Add argument and options to the parser + intermediate_params_list = ['params', 'input', 'hvsr_data', 'hvsr_results'] + paramNamesList = [] + for i, param in enumerate(parameters): + for name, parameter in param.items(): + # Add arguments and options here + if name not in paramNamesList and name not in intermediate_params_list: + paramNamesList.append(name) + curr_doc_str = get_param_docstring(func=hvsrFunctions[i], param_name=name) + if name == 'input_data': + parser.add_argument(name, help=f'{curr_doc_str}') + elif name == 'verbose': + parser.add_argument('-v', '--verbose', action='store_true', help='Print status and results to terminal.', default=parameter.default) + else: + helpStr = f'Keyword argument {name} in function sprit.{hvsrFunctions[i].__name__}(). default={parameter.default}.\n\t{curr_doc_str}' + parser.add_argument(F'--{name}', help=helpStr, default=parameter.default) + + # Add more arguments/options as needed + args = parser.parse_args() + + # Map command-line arguments/options to kwargs + kwargs = {} + for arg_name, arg_value in vars(args).items(): + if isinstance(arg_value, str): + if "=" in arg_value: + arg_value = {arg_value.split('=')[0]: arg_value.split('=')[1]} + + if arg_value.lower()=='true': + arg_value = True + elif arg_value.lower()=='false': + arg_value = False + elif arg_value.lower() == 'none': + arg_value = None + elif "[" in arg_value: + arg_value = arg_value.replace('[', '').replace(']','') + arg_value = arg_value.split(',') + elif "," in arg_value: + arg_value = arg_value.split(',') + kwargs[arg_name] = arg_value + + # Call the sprit.run function with the generated kwargs + kwargs['input_data'] = kwargs['input_data'].replace("'", "") #Remove single quotes to reduce errors + if str(kwargs['input_data']).lower()=='gui': + sprit.gui() + else: + #Print a summary if not verbose + if not kwargs['verbose']: + print("Running sprit.run() with the following arguments (use --verbose for more information):") + print("sprit.run(", end='') + for key, value in kwargs.items(): + if 'kwargs' in str(key): + pass + else: + if type(value) is str: + print(f"{key}='{value}'",end=', ') + else: + print(f"{key}={value}",end=', ') + print('**ppsd_kwargs, **kwargs', end='') + print(')') + + sprit.run(**kwargs)
+ + +if __name__ == '__main__': + main() +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_modules/sprit/sprit_hvsr.html b/docs/_modules/sprit/sprit_hvsr.html new file mode 100644 index 00000000..f7b3a7fe --- /dev/null +++ b/docs/_modules/sprit/sprit_hvsr.html @@ -0,0 +1,10021 @@ + + + + + + sprit.sprit_hvsr — sprit 1.4 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for sprit.sprit_hvsr

+"""
+This module is the main SpRIT module that contains all the functions needed to run HVSR analysis.
+
+The functions defined here are read both by the SpRIT graphical user interface and by the command-line interface to run HVSR analysis on input data.
+
+See documentation for individual functions for more information.
+"""
+import base64
+import copy
+import datetime
+import inspect
+import io
+import json
+import math
+import operator
+import os
+import pathlib
+import pickle
+import pkg_resources
+import struct
+import sys
+import tempfile
+import traceback
+import warnings
+import xml.etree.ElementTree as ET
+
+import matplotlib
+from matplotlib.backend_bases import MouseButton
+import matplotlib.dates as mdates
+import matplotlib.pyplot as plt
+import numpy as np
+import obspy
+from obspy.signal import PPSD
+import pandas as pd
+import plotly
+from pyproj import CRS, Transformer
+import scipy
+
+try:  # For distribution
+    from sprit import sprit_utils
+    from sprit import sprit_tkinter_ui
+    from sprit import sprit_jupyter_UI
+    from sprit import sprit_plot
+except Exception:  # For testing
+    import sprit_utils
+    import sprit_tkinter_ui
+    import sprit_jupyter_UI
+    import sprit_plot
+
+NOWTIME = datetime.datetime.now()
+global spritApp
+
+# Main variables
+greek_chars = {'sigma': u'\u03C3', 'epsilon': u'\u03B5', 'teta': u'\u03B8'}
+channel_order = {'Z': 0, '1': 1, 'N': 1, '2': 2, 'E': 2}
+separator_character = '='
+obspyFormats =  ['AH', 'ALSEP_PSE', 'ALSEP_WTH', 'ALSEP_WTN', 'CSS', 'DMX', 'GCF', 'GSE1', 'GSE2', 'KINEMETRICS_EVT', 'KNET', 'MSEED', 'NNSA_KB_CORE', 'PDAS', 'PICKLE', 'Q', 'REFTEK130', 'RG16', 'SAC', 'SACXY', 'SEG2', 'SEGY', 'SEISAN', 'SH_ASC', 'SLIST', 'SU', 'TSPAIR', 'WAV', 'WIN', 'Y']
+
+t0 = datetime.datetime.now().time()
+max_rank = 0
+plotRows = 4
+
+# Get the main resources directory path, and the other paths as well
+resource_dir = pathlib.Path(pkg_resources.resource_filename(__name__, 'resources/'))
+sample_data_dir = resource_dir.joinpath('sample_data')
+settings_dir = resource_dir.joinpath('settings')
+
+sampleFileKeyMap = {'1':sample_data_dir.joinpath('SampleHVSRSite1_AM.RAC84.00.2023.046_2023-02-15_1704-1734.MSEED'),
+                    '2':sample_data_dir.joinpath('SampleHVSRSite2_AM.RAC84.00.2023-02-15_2132-2200.MSEED'),
+                    '3':sample_data_dir.joinpath('SampleHVSRSite3_AM.RAC84.00.2023.199_2023-07-18_1432-1455.MSEED'),
+                    '4':sample_data_dir.joinpath('SampleHVSRSite4_AM.RAC84.00.2023.199_2023-07-18_1609-1629.MSEED'),
+                    '5':sample_data_dir.joinpath('SampleHVSRSite5_AM.RAC84.00.2023.199_2023-07-18_2039-2100.MSEED'),
+                    '6':sample_data_dir.joinpath('SampleHVSRSite6_AM.RAC84.00.2023.192_2023-07-11_1510-1528.MSEED'),
+                    '7':sample_data_dir.joinpath('SampleHVSRSite7_BNE_4_AM.RAC84.00.2023.191_2023-07-10_2237-2259.MSEED'),
+                    '8':sample_data_dir.joinpath('SampleHVSRSite8_BNE_6_AM.RAC84.00.2023.191_2023-07-10_1806-1825.MSEED'),
+                    '9':sample_data_dir.joinpath('SampleHVSRSite9_BNE-2_AM.RAC84.00.2023.192_2023-07-11_0000-0011.MSEED'),
+                    '10':sample_data_dir.joinpath('SampleHVSRSite10_BNE_4_AM.RAC84.00.2023.191_2023-07-10_2237-2259.MSEED'),
+                    
+                    'sample1':sample_data_dir.joinpath('SampleHVSRSite1_AM.RAC84.00.2023.046_2023-02-15_1704-1734.MSEED'),
+                    'sample2':sample_data_dir.joinpath('SampleHVSRSite2_AM.RAC84.00.2023-02-15_2132-2200.MSEED'),
+                    'sample3':sample_data_dir.joinpath('SampleHVSRSite3_AM.RAC84.00.2023.199_2023-07-18_1432-1455.MSEED'),
+                    'sample4':sample_data_dir.joinpath('SampleHVSRSite4_AM.RAC84.00.2023.199_2023-07-18_1609-1629.MSEED'),
+                    'sample5':sample_data_dir.joinpath('SampleHVSRSite5_AM.RAC84.00.2023.199_2023-07-18_2039-2100.MSEED'),
+                    'sample6':sample_data_dir.joinpath('SampleHVSRSite6_AM.RAC84.00.2023.192_2023-07-11_1510-1528.MSEED'),
+                    'sample7':sample_data_dir.joinpath('SampleHVSRSite7_BNE_4_AM.RAC84.00.2023.191_2023-07-10_2237-2259.MSEED'),
+                    'sample8':sample_data_dir.joinpath('SampleHVSRSite8_BNE_6_AM.RAC84.00.2023.191_2023-07-10_1806-1825.MSEED'),
+                    'sample9':sample_data_dir.joinpath('SampleHVSRSite9_BNE-2_AM.RAC84.00.2023.192_2023-07-11_0000-0011.MSEED'),
+                    'sample10':sample_data_dir.joinpath('SampleHVSRSite10_BNE_4_AM.RAC84.00.2023.191_2023-07-10_2237-2259.MSEED'),
+
+                    'sample_1':sample_data_dir.joinpath('SampleHVSRSite1_AM.RAC84.00.2023.046_2023-02-15_1704-1734.MSEED'),
+                    'sample_2':sample_data_dir.joinpath('SampleHVSRSite2_AM.RAC84.00.2023-02-15_2132-2200.MSEED'),
+                    'sample_3':sample_data_dir.joinpath('SampleHVSRSite3_AM.RAC84.00.2023.199_2023-07-18_1432-1455.MSEED'),
+                    'sample_4':sample_data_dir.joinpath('SampleHVSRSite4_AM.RAC84.00.2023.199_2023-07-18_1609-1629.MSEED'),
+                    'sample_5':sample_data_dir.joinpath('SampleHVSRSite5_AM.RAC84.00.2023.199_2023-07-18_2039-2100.MSEED'),
+                    'sample_6':sample_data_dir.joinpath('SampleHVSRSite6_AM.RAC84.00.2023.192_2023-07-11_1510-1528.MSEED'),
+                    'sample_7':sample_data_dir.joinpath('SampleHVSRSite7_BNE_4_AM.RAC84.00.2023.191_2023-07-10_2237-2259.MSEED'),
+                    'sample_8':sample_data_dir.joinpath('SampleHVSRSite8_BNE_6_AM.RAC84.00.2023.191_2023-07-10_1806-1825.MSEED'),
+                    'sample_9':sample_data_dir.joinpath('SampleHVSRSite9_BNE-2_AM.RAC84.00.2023.192_2023-07-11_0000-0011.MSEED'),
+                    'sample_10':sample_data_dir.joinpath('SampleHVSRSite10_BNE_4_AM.RAC84.00.2023.191_2023-07-10_2237-2259.MSEED'),
+                    
+                    'batch':sample_data_dir.joinpath('Batch_SampleData.csv')}
+
+sampleListNos = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
+SAMPLE_LIST = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'batch', 'sample', 'sample_batch']
+for s in sampleListNos:
+    SAMPLE_LIST.append(f'sample{s}')
+    SAMPLE_LIST.append(f'sample_{s}')
+
+# plt.rcParams['figure.figsize'] = (8,5.25)
+# plt.rcParams['figure.dpi'] = 500
+
+# CLASSES
+
+# Check if the data is already the right class
+# Define a decorator that wraps the __init__ method
+
+[docs] +def check_instance(init): + def wrapper(self, *args, **kwargs): + # Check if the first argument is an instance of self.__class__ + if args and isinstance(args[0], self.__class__): + # Copy its attributes to self + self.__dict__.update(args[0].__dict__) + else: + # Call the original __init__ method + init(self, *args, **kwargs) + return wrapper
+ + + +# Class for batch data +
+[docs] +class HVSRBatch: + """HVSRBatch is the data container used for batch processing. + It contains several HVSRData objects (one for each site). + These can be accessed using their site name, + either square brackets (HVSRBatchVariable["SiteName"]) or the dot (HVSRBatchVariable.SiteName) accessor. + + The dot accessor may not work if there is a space in the site name. + + All of the functions in the sprit package are designed to perform the bulk of their operations iteratively + on the individual HVSRData objects contained in the HVSRBatch object, and do little with the HVSRBatch object itself, + besides using it determine which sites are contained within it. + + """ + @check_instance + def __init__(self, batch_dict): + """HVSR Batch initializer + + Parameters + ---------- + batch_dict : dict + Dictionary containing Key value pairs with {sitename: HVSRData object} + """ + self._batch_dict = batch_dict + self.batch_dict = self._batch_dict + self.batch = True + + for sitename, hvsrdata in batch_dict.items(): + setattr(self, sitename, hvsrdata) + self[sitename]['batch'] = True + self.sites = list(self._batch_dict.keys()) + + + #METHODS + def __to_json(self, filepath): + """Not yet implemented, but may allow import/export to json files in the future, rather than just .hvsr pickles + + Parameters + ---------- + filepath : filepath object + Location to save HVSRBatch object as json + """ + # open the file with the given filepath + with open(filepath, 'w') as f: + # dump the JSON string to the file + json.dump(self, f, default=lambda o: o.__dict__, sort_keys=True, indent=4) + +
+[docs] + def export(self, hvsr_export_path=True, ext='hvsr'): + """Method to export HVSRData objects in HVSRBatch container to indivdual .hvsr pickle files. + + Parameters + ---------- + hvsr_export_path : filepath, default=True + Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). By default True. If True, it will first try to save each file to the same directory as input_data, then if that does not work, to the current working directory, then to the user's home directory, by default True + ext : str, optional + The extension to use for the output, by default 'hvsr'. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension. + """ + export_data(hvsr_data=self, hvsr_export_path=hvsr_export_path, ext=ext)
+ + +
+[docs] + def keys(self): + """Method to return the "keys" of the HVSRBatch object. For HVSRBatch objects, these are the site names. Functions similar to dict.keys(). + + Returns + ------- + dict_keys + A dict_keys object listing the site names of each of the HVSRData objects contained in the HVSRBatch object + """ + return self.batch_dict.keys()
+ + +
+[docs] + def items(self): + """Method to return both the site names and the HVSRData object as a set of dict_items tuples. Functions similar to dict.items(). + + Returns + ------- + _type_ + _description_ + """ + return self.batch_dict.items()
+ + +
+[docs] + def copy(self, type='shallow'): + """Make a copy of the HVSRBatch object. Uses python copy module. + + Parameters + ---------- + type : str {'shallow', 'deep'} + Based on input, creates either a shallow or deep copy of the HVSRBatch object. Shallow is equivalent of copy.copy(). Input of 'deep' is equivalent of copy.deepcopy() (still experimental). Defaults to shallow. + + """ + if type.lower()=='deep': + return HVSRBatch(copy.deepcopy(self._batch_dict)) + else: + return HVSRBatch(copy.copy(self._batch_dict))
+ + + #Method wrapper of sprit.plot_hvsr function +
+[docs] + def plot(self, **kwargs): + """Method to plot data, based on the sprit.plot_hvsr() function. + + All the same kwargs and default values apply as plot_hvsr(). + For return_fig, returns it to the 'Plot_Report' attribute of each HVSRData object + + Returns + ------- + _type_ + _description_ + + See Also + -------- + plot_hvsr + """ + for sitename in self: + if 'return_fig' in kwargs.keys() and kwargs['return_fig']: + self[sitename]['Plot_Report'] = plot_hvsr(self[sitename], **kwargs) + else: + plot_hvsr(self[sitename], **kwargs) + + return self
+ + +
+[docs] + def get_report(self, **kwargs): + """Method to get report from processed data, in print, graphical, or tabular format. + + Returns + ------- + Variable + May return nothing, pandas.Dataframe, or pyplot Figure, depending on input. + + See Also + -------- + get_report + """ + if 'report_formats' in kwargs.keys(): + if 'table' == kwargs['report_formats']: + for sitename in self: + rowList = [] + rowList.append(get_report(self[sitename], **kwargs)) + return pd.concat(rowList, ignore_index=True) + elif 'plot' == kwargs['report_formats']: + plotDict = {} + for sitename in self: + if 'return_fig' in kwargs.keys() and kwargs['return_fig']: + plotDict[sitename] = get_report(self[sitename], **kwargs) + else: + get_report(self[sitename], **kwargs) + return plotDict + + #Only report_formats left is print, doesn't return anything, so doesn't matter if defalut or not + for sitename in self: + get_report(self[sitename], **kwargs) + return
+ + +
+[docs] + def report(self, **kwargs): + """Wrapper of get_report() + + See Also + -------- + get_report + """ + return self.get_report(**kwargs)
+ + +
+[docs] + def export_settings(self, site_name=None, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True): + """Method to export settings from HVSRData object in HVSRBatch object. + + Simply calls sprit.export_settings() from specified HVSRData object in the HVSRBatch object. + See sprit.export_settings() for more details. + + Parameters + ---------- + site_name : str, default=None + The name of the site whose settings should be exported. If None, will default to the first site, by default None. + export_settings_path : str, optional + Filepath to output file. If left as 'default', will save as the default value in the resources directory. If that is not possible, will save to home directory, by default 'default' + export_settings_type : str, {'all', 'instrument', 'processing'}, optional + They type of settings to save, by default 'all' + include_location : bool, optional + Whether to include the location information in the instrument settings, if that settings type is selected, by default False + verbose : bool, optional + Whether to print output (filepath and settings) to terminal, by default True + + + See Also + -------- + export_settings + """ + #If no site name selected, use first site + if site_name is None: + site_name = self.sites[0] + + export_settings(hvsr_data=self[site_name], + export_settings_path=export_settings_path, export_settings_type=export_settings_type, include_location=include_location, verbose=verbose)
+ + + def __iter__(self): + return iter(self._batch_dict.keys()) + + def __setitem__(self, key, value): + setattr(self, key, value) + + def __getitem__(self, key): + return getattr(self, key)
+ + + +# Class for each HVSR site +
+[docs] +class HVSRData: + """HVSRData is the basic data class of the sprit package. + It contains all the processed data, input parameters, and reports. + + These attributes and objects can be accessed using square brackets or the dot accessor. For example, to access the site name, HVSRData['site'] and HVSRData.site will both return the site name. + + Some of the methods that work on the HVSRData object (e.g., .plot() and .get_report()) are essentially wrappers for some of the main sprit package functions (sprit.plot_hvsr() and sprit.get_report(), respectively) + """ + @check_instance + def __init__(self, params): + self.params = params + #self.datastream = None + self.batch = False + #self.tsteps_used = [] + + for key, value in params.items(): + setattr(self, key, value) + if key=='input_params': + for k, v in params[key].items(): + setattr(self, k, v) + + def __setitem__(self, key, value): + setattr(self, key, value) + + def __getitem__(self, key): + return getattr(self, key) + + def __to_json(self, filepath): + """Not yet supported, will export HVSRData object to json""" + # open the file with the given filepath + def unseriable_fun(o): + if isinstance(o, np.ndarray): + output = o.tolist() + try: + output = o.__dict__ + except: + output = dir(o) + return output + + with open(filepath, 'w') as f: + # dump the JSON string to the file + json.dump(self, f, default=unseriable_fun, sort_keys=True, indent=4) + +
+[docs] + def export(self, hvsr_export_path=None, ext='hvsr'): + """Method to export HVSRData objects to .hvsr pickle files. + + Parameters + ---------- + hvsr_export_path : filepath, default=True + Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). + By default True. + If True, it will first try to save each file to the same directory as input_data, then if that does not work, to the current working directory, then to the user's home directory, by default True + ext : str, optional + The extension to use for the output, by default 'hvsr'. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension. + """ + export_data(hvsr_data=self, hvsr_export_path=hvsr_export_path, ext=ext)
+ + + # METHODS (many reflect dictionary methods) +
+[docs] + def keys(self): + """Method to return the "keys" of the HVSRData object. For HVSRData objects, these are the attributes and parameters of the object. Functions similar to dict.keys(). + + Returns + ------- + dict_keys + A dict_keys object of the HVSRData objects attributes, parameters, etc. + """ + keyList = [] + for k in dir(self): + if not k.startswith('_'): + keyList.append(k) + return keyList
+ + +
+[docs] + def items(self): + """Method to return the "items" of the HVSRData object. For HVSRData objects, this is a dict_items object with the keys and values in tuples. Functions similar to dict.items(). + + Returns + ------- + dict_items + A dict_items object of the HVSRData objects attributes, parameters, etc. + """ + return self.params.items()
+ + +
+[docs] + def copy(self, type='shallow'): + """Make a copy of the HVSRData object. Uses python copy module. + + Parameters + ---------- + type : str {'shallow', 'deep'} + Based on input, creates either a shallow or deep copy of the HVSRData object. Shallow is equivalent of copy.copy(). Input of type='deep' is equivalent of copy.deepcopy() (still experimental). Defaults to shallow. + + """ + if type.lower()=='deep': + return HVSRData(copy.deepcopy(self.params)) + else: + return HVSRData(copy.copy(self.params))
+ + +
+[docs] + def plot(self, **kwargs): + """Method to plot data, wrapper of sprit.plot_hvsr() + + Returns + ------- + matplotlib.Figure, matplotlib.Axis (if return_fig=True) + + See Also + -------- + plot_hvsr + plot_azimuth + """ + if 'close_figs' not in kwargs.keys(): + kwargs['close_figs']=True + plot_return = plot_hvsr(self, **kwargs) + plt.show() + return plot_return
+ + +
+[docs] + def get_report(self, **kwargs): + """Method to get report from processed data, in print, graphical, or tabular format. + + Returns + ------- + Variable + May return nothing, pandas.Dataframe, or pyplot Figure, depending on input. + + See Also + -------- + get_report + """ + report_return = get_report(self, **kwargs) + return report_return
+ + +
+[docs] + def report(self, **kwargs): + """Wrapper of get_report() + + See Also + -------- + get_report + """ + report_return = get_report(self, **kwargs) + return report_return
+ + +
+[docs] + def export_settings(self, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True): + """Method to export settings from HVSRData object. Simply calls sprit.export_settings() from the HVSRData object. See sprit.export_settings() for more details. + + Parameters + ---------- + export_settings_path : str, optional + Filepath to output file. If left as 'default', will save as the default value in the resources directory. If that is not possible, will save to home directory, by default 'default' + export_settings_type : str, {'all', 'instrument', 'processing'}, optional + They type of settings to save, by default 'all' + include_location : bool, optional + Whether to include the location information in the instrument settings, if that settings type is selected, by default False + verbose : bool, optional + Whether to print output (filepath and settings) to terminal, by default True + """ + export_settings(hvsr_data=self, + export_settings_path=export_settings_path, export_settings_type=export_settings_type, include_location=include_location, verbose=verbose)
+ + + #ATTRIBUTES + #params + @property + def params(self): + """Dictionary containing the parameters used to process the data + + Returns + ------- + dict + Dictionary containing the process parameters + """ + return self._params + + @params.setter + def params(self, value): + if not (isinstance(value, dict)): + raise ValueError("params must be a dict type, currently passing {} type.".format(type(value))) + self._params = value + + #datastream + @property + def datastream(self): + """A copy of the original obspy datastream read in. This helps to retain the original data even after processing is carried out. + + Returns + ------- + obspy.core.Stream.stream + Obspy stream + """ + return self._datastream + + @datastream.setter + def datastream(self, value): + if value is not None and (not isinstance(value, obspy.core.stream.Stream)): + raise ValueError("datastream must be an obspy Stream.") + self._datastream = value + + #batch + @property + def batch(self): + """Whether this HVSRData object is part of an HVSRBatch object. This is used throughout the code to help direct the object into the proper processing pipeline. + + Returns + ------- + bool + True if HVSRData object is part of HVSRBatch object, otherwise, False + """ + return self._batch + + @batch.setter + def batch(self, value): + if value == 0: + value = False + elif value == 1: + value = True + else: + value = None + if not isinstance(value, bool): + raise ValueError("batch must be boolean type") + self._batch = value + + #PPSD object from obspy (static) + @property + def ppsds_obspy(self): + """The original ppsd information from the obspy.signal.spectral_estimation.PPSD(), so as to keep original if copy is manipulated/changed.""" + return self._ppsds_obspy + + @ppsds_obspy.setter + def ppsds_obspy(self, value): + """Checks whether the ppsd_obspy is of the proper type before saving as attribute""" + if not isinstance(value, obspy.signal.spectral_estimation.PPSD): + if not isinstance(value, dict): + raise ValueError("ppsds_obspy must be obspy.PPSD or dict with osbpy.PPSDs") + else: + for key in value.keys(): + if not isinstance(value[key], obspy.signal.spectral_estimation.PPSD): + raise ValueError("ppsds_obspy must be obspy.PPSD or dict with osbpy.PPSDs") + self._ppsds_obspy=value + + #PPSD dict, copied from obspy ppsds (dynamic) + @property + def ppsds(self): + """Dictionary copy of the class object obspy.signal.spectral_estimation.PPSD(). The dictionary copy allows manipulation of the data in PPSD, whereas that data cannot be easily manipulated in the original Obspy object. + + Returns + ------- + dict + Dictionary copy of the PPSD information from generate_ppsds() + """ + return self._ppsds + + @ppsds.setter + def ppsds(self, value): + if not isinstance(value, dict): + raise ValueError("ppsds dict with infomration from osbpy.PPSD (created by sprit.generate_ppsds())") + self._ppsds=value
+ + + +
+[docs] +def gui_test(): + import subprocess + print(sprit_tkinter_ui.__file__) + guiFile = sprit_tkinter_ui.__file__ + subprocess.call(guiFile, shell=True)
+ + + +# Launch the tkinter gui + +
+[docs] +def gui(kind='browser'): + """Function to open a graphical user interface (gui) + + Parameters + ---------- + kind : str, optional + What type of gui to open. "default" opens regular windowed interface, + "widget" opens jupyter widget' + "lite" open lite (pending update), by default 'default' + + """ + browserList = ['browser', 'remi', 'default', 'd'] + windowList = ['windowed', 'window', 'qt', 'tkinter', 'tk'] + widgetList = ['widget', 'jupyter', 'notebook', 'w', 'nb'] + liteList = ['lite', 'light', 'basic', 'l', 'b'] + + if kind.lower() in browserList: + import subprocess + import pkg_resources + streamlitPath = pathlib.Path(__file__).parent.joinpath("sprit_streamlit_ui.py") + cmd = ['streamlit', 'run', streamlitPath.as_posix()] + #subprocess.run(cmd) + import sys + + from streamlit.web import cli as stcli + import streamlit + import sys + + import subprocess + import tempfile + + temp_dir = tempfile.TemporaryDirectory() + def run_streamlit_app(path_dir): + temp_dir = tempfile.TemporaryDirectory() + # create a temporary directory + fpathList = ['sprit_hvsr.py', 'sprit_tkinter_ui.py', 'sprit_jupyter_ui.py', 'sprit_utils.py', 'sprit_plot.py', '__init__.py', 'sprit_streamlit_ui.py'] + currDir = os.path.dirname(os.path.abspath(__file__)) + for fpath in fpathList: + temp_file_path = os.path.join(temp_dir.name, fpath) + with open(pathlib.Path(currDir).joinpath(fpath), 'r') as cf: + scriptText = cf.read() + # write the streamlit app code to a Python script in the temporary directory + with open(temp_file_path, 'w') as f: + f.write(scriptText) + + # execute the streamlit app + try: + # execute the streamlit app + subprocess.run( + ['streamlit', "run", temp_file_path], + stderr=subprocess.DEVNULL + ) + + except KeyboardInterrupt: + pass + # clean up the temporary directory when done + temp_dir.cleanup() + + #with open(streamlitPath.parent.as_posix(), 'r') as file: + # appText = file.read() + + #installed_packages = pkg_resources.working_set + #for package in installed_packages: + # print(f"{package.key}=={package.version}") + + run_streamlit_app(pathlib.Path(__name__).parent) + + #streamlit.web.bootstrap.run(streamlitPath.as_posix(), '', [], []) + #process = subprocess.Popen(["streamlit", "run", os.path.join( + # 'application', 'main', 'services', 'streamlit_app.py')]) + + elif kind.lower() in windowList: + import pkg_resources + #guiPath = pathlib.Path(os.path.realpath(__file__)) + try: + from sprit.sprit_tkinter_ui import SPRIT_App + except: + from sprit.sprit_tkinter_ui import SPRIT_App + + try: + import tkinter as tk + except: + if sys.platform == 'linux': + raise ImportError('The SpRIT graphical interface uses tkinter, which ships with python but is not pre-installed on linux machines. Use "apt-get install python-tk" or "apt-get install python3-tk" to install tkinter. You may need to use the sudo command at the start of those commands.') + + def on_gui_closing(): + plt.close('all') + gui_root.quit() + gui_root.destroy() + + if sys.platform == 'linux': + if not pathlib.Path("/usr/share/doc/python3-tk").exists(): + warnings.warn('The SpRIT graphical interface uses tkinter, which ships with python but is not pre-installed on linux machines. Use "apt-get install python-tk" or "apt-get install python3-tk" to install tkinter. You may need to use the sudo command at the start of those commands.') + + gui_root = tk.Tk() + try: + try: + icon_path = pathlib.Path(pkg_resources.resource_filename(__name__, 'resources/icon/sprit_icon_alpha.ico')) + gui_root.iconbitmap(icon_path) + except: + icon_path = pathlib.Path(pkg_resources.resource_filename(__name__, 'resources/icon/sprit_icon.png')) + gui_root.iconphoto(False, tk.PhotoImage(file=icon_path.as_posix())) + except Exception as e: + print("ICON NOT LOADED, still opening GUI") + + gui_root.resizable(True, True) + spritApp = SPRIT_App(master=gui_root) # Open the app with a tk.Tk root + + gui_root.protocol("WM_DELETE_WINDOW", on_gui_closing) + gui_root.mainloop() # Run the main loop + elif kind.lower() in widgetList: + try: + sprit_jupyter_UI.create_jupyter_ui() + except Exception as e: + print(e)
+ + + +# FUNCTIONS AND METHODS +# The run function to rule them all (runs all needed for simply processing HVSR) +
+[docs] +def run(input_data, source='file', azimuth_calculation=False, noise_removal=False, outlier_curves_removal=False, verbose=False, **kwargs): + """The sprit.run() is the main function that allows you to do all your HVSR processing in one simple step (sprit.run() is how you would call it in your code, but it may also be called using sprit.sprit_hvsr.run()) + + The input_data parameter of sprit.run() is the only required parameter. This can be either a single file, a list of files (one for each component, for example), a directory (in which case, all obspy-readable files will be added to an HVSRBatch instance), a Rasp. Shake raw data directory, or sample data. + + Notes + ----- + The sprit.run() function calls the following functions. This is the recommended order/set of functions to run to process HVSR using SpRIT. See the API documentation for these functions for more information: + - input_params(): The input_data parameter of input_params() is the only required variable, though others may also need to be called for your data to process correctly. + - fetch_data(): the source parameter of fetch_data() is the only explicit variable in the sprit.run() function aside from input_data and verbose. Everything else gets delivered to the correct function via the kwargs dictionary + - remove_noise(): by default, the kind of noise removal is remove_method='auto'. See the remove_noise() documentation for more information. If remove_method is set to anything other than one of the explicit options in remove_noise, noise removal will not be carried out. + - generate_ppsds(): generates ppsds for each component, which will be combined/used later. Any parameter of obspy.signal.spectral_estimation.PPSD() may also be read into this function. + - remove_outlier_curves(): removes any outlier ppsd curves so that the data quality for when curves are combined will be enhanced. See the remove_outlier_curves() documentation for more information. + - process_hvsr(): this is the main function processing the hvsr curve and statistics. See process_hvsr() documentation for more details. The hvsr_band parameter sets the frequency spectrum over which these calculations occur. + - check_peaks(): this is the main function that will find and 'score' peaks to get a best peak. The parameter peak_freq_range can be set to limit the frequencies within which peaks are checked and scored. + - get_report(): this is the main function that will print, plot, and/or save the results of the data. See the get_report() API documentation for more information. + - export_data(): this function exports the final data output as a pickle file (by default, this pickle object has a .hvsr extension). This can be used to read data back into SpRIT without having to reprocess data. + + Parameters + ---------- + input_data : str or filepath object that can be read by obspy + Filepath to data to be processed. This may be a file or directory, depending on what kind of data is being processed (this can be specified with the source parameter). + For sample data, The following can be specified as the input_data parameter: + - Any integer 1-6 (inclusive), or the string (e.g., input_data="1" or input_data=1 will work) + - The word "sample" before any integer (e.g., input_data="sample1") + - The word "sample" will default to "sample1" if source='file'. + - If source='batch', input_data should be input_data='sample' or input_data='batch'. In this case, it will read and process all the sample files using the HVSRBatch class. Set verbose=True to see all the information in the sample batch csv file. + source : str, optional + _description_, by default 'file' + azimuth_calculation : bool, optional + Whether to perform azimuthal analysis, by default False. + noise_removal : bool, default=False + Whether to remove noise (before processing PPSDs) + outlier_curves_removal : bool, default=False + Whether to remove outlier curves from HVSR time windows + show_plot : bool, default=True + Whether to show plots. This does not affect whether the plots are created (and then inserted as an attribute of HVSRData), only whether they are shown. + verbose : bool, optional + _description_, by default False + **kwargs + Keyword arguments for the functions listed above. The keyword arguments are unique, so they will get parsed out and passed into the appropriate function. + + Returns + ------- + hvsr_results : sprit.HVSRData or sprit.HVSRBatch object + If a single file/data point is being processed, a HVSRData object will be returned. Otherwise, it will be a HVSRBatch object. See their documention for more information. + + Raises + ------ + RuntimeError + If the input parameter may not be read correctly. This is raised if the input_params() function fails. This raises an error since no other data processing or reading steps will be able to carried out correctly. + RuntimeError + If the data is not read/fetched correctly using fetch_data(), an error will be raised. This is raised if the fetch_data() function fails. This raises an error since no other data processing steps will be able to carried out correctly. + RuntimeError + If the data being processed is a single file, an error will be raised if generate_ppsds() does not work correctly. No errors are raised for remove_noise() errors (since that is an optional step) and the process_hvsr() step (since that is the last processing step) . + """ + + orig_args = locals().copy() # Get the initial arguments + + if 'hvsr_band' not in kwargs.keys(): + kwargs['hvsr_band'] = inspect.signature(input_params).parameters['hvsr_band'].default + if 'peak_freq_range' not in kwargs.keys(): + kwargs['peak_freq_range'] = inspect.signature(input_params).parameters['peak_freq_range'].default + if 'processing_parameters' not in kwargs.keys(): + kwargs['processing_parameters'] = {} + + # Separate out input_params and fetch_data processes based on whether batch has been specified + batchlist = ['batch', 'bach', 'bath', 'b'] + if str(source).lower() in batchlist and str('input_data').lower() not in SAMPLE_LIST: + try: + batch_data_read_kwargs = {k: v for k, v in kwargs.items() if k in tuple(inspect.signature(batch_data_read).parameters.keys())} + hvsrDataIN = batch_data_read(batch_data=input_data, verbose=verbose, **batch_data_read_kwargs) + except Exception as e: + raise RuntimeError(f'Batch data read in was not successful:\n{e}') + else: + # Get the input parameters + try: + input_params_kwargs = {k: v for k, v in kwargs.items() if k in tuple(inspect.signature(input_params).parameters.keys())} + params = input_params(input_data=input_data, verbose=verbose, **input_params_kwargs) + except Exception as e: + if hasattr(e, 'message'): + errMsg = e.message + else: + errMsg = e + + print(f"ERROR during input_params(): {errMsg}") + # Even if batch, this is reading in data for all sites so we want to raise error, not just warn + raise RuntimeError('Input parameters not read correctly, see sprit.input_params() function and parameters') + # If input_params fails, initialize params as an HVSRDATA + #params = {'ProcessingStatus':{'InputParamsStatus':False, 'OverallStatus':False}} + #params.update(input_params_kwargs) + #params = sprit_utils.make_it_classy(params) + + # Fetch Data + try: + fetch_data_kwargs = {k: v for k, v in kwargs.items() if k in tuple(inspect.signature(fetch_data).parameters.keys())} + hvsrDataIN = fetch_data(params=params, source=source, verbose=verbose, **fetch_data_kwargs) + except Exception as e: + # Even if batch, this is reading in data for all sites so we want to raise error, not just warn + if hasattr(e, 'message'): + errMsg = e.message + else: + errMsg = e + + print(f"ERROR during fetch_data(): {errMsg}") + raise RuntimeError('Data not read correctly, see sprit.fetch_data() function and parameters for more details.') + + # BREAK OUT FOR BATCH PROCESSING + if isinstance(hvsrDataIN, HVSRBatch): + + # Create dictionary that will be used to create HVSRBatch object + hvsrBatchDict = {} + + # Loop through each site and run sprit.run() for each HVSRData object + for site_name, site_data in hvsrDataIN.items(): + run_kwargs = {}#orig_args.copy() # Make a copy so we don't accidentally overwrite + print(f'\n\n**PROCESSING DATA FOR SITE {site_name.upper()}**\n') + run_kwargs['input_data'] = site_data + + # Update run kwargs + # First, get processing_parameters per site + for funname, fundict in site_data['processing_parameters'].items(): + for funk, funv in fundict.items(): + run_kwargs[funk] = funv + + # Overwrite per-site processing parameters with params passed to sprit.run() as kwargs + for paramname, paramval in kwargs.items(): + if paramname != 'source': # Don't update source for batch data + run_kwargs[paramname] = paramval + + dont_update_these_args = ['input_data', 'source', 'kwargs'] + + # Overwrite per-site processing parameters with sprit.run() + run_args = orig_args.copy() + for k, v in run_args.items(): + if k not in dont_update_these_args: + if v != inspect.signature(run).parameters[k].default: + run_kwargs[k] = v + + try: + hvsrBatchDict[site_name] = run(**run_kwargs) + except Exception as e: + sprit_utils._get_error_from_exception(e) + + hvsrBatchDict[site_name] = site_data + hvsrBatchDict[site_name]['ProcessingStatus']['PPSDStatus']=False + hvsrBatchDict[site_name]['ProcessingStatus']['OverallStatus'] = False + + return HVSRBatch(hvsrBatchDict) + + # Calculate azimuths + hvsr_az = hvsrDataIN + azimuth_kwargs = {k: v for k, v in kwargs.items() if k in tuple(inspect.signature(calculate_azimuth).parameters.keys())} + if 'horizontal_method' in kwargs.keys() and (str(kwargs['horizontal_method']) == '8' or 'single' in str(kwargs['horizontal_method']).lower()): + azimuth_calculation = True + azimuth_kwargs['azimuth_type'] = kwargs['azimuth_type'] = 'single' + + if 'azimuth_angle' not in kwargs.keys(): + azimuth_kwargs['azimuth_angle'] = kwargs['azimuth_angle'] = 45 + if len(azimuth_kwargs.keys()) > 0 or azimuth_calculation is True: + try: + hvsr_az = calculate_azimuth(hvsrDataIN, verbose=verbose, **azimuth_kwargs) + except Exception as e: + if hasattr(e, 'message'): + errMsg = e.message + else: + errMsg = e + + print(f"Error during generate_ppsds() for {hvsr_az.site}: \n{errMsg}") + + if isinstance(hvsr_az, HVSRBatch): + for site_name in hvsr_az.keys(): + hvsr_az[site_name]['ProcessingStatus']['Azimuth'] = False + else: + hvsr_az['ProcessingStatus']['Azimuth'] = False + + # Remove Noise + data_noiseRemoved = hvsr_az + try: + remove_noise_kwargs = {k: v for k, v in kwargs.items() if k in tuple(inspect.signature(remove_noise).parameters.keys())} + if noise_removal or remove_noise_kwargs != {}: + remove_noise_kwargs = {k: v for k, v in kwargs.items() if k in tuple(inspect.signature(remove_noise).parameters.keys())} + try: + data_noiseRemoved = remove_noise(hvsr_data=data_noiseRemoved, verbose=verbose, **remove_noise_kwargs) + except Exception as e: + if hasattr(e, 'message'): + errMsg = e.message + else: + errMsg = e + + print(f"Error with remove_noise for site {data_noiseRemoved.site}: {errMsg}") + + # Mark that remove_noise failed + # Reformat data so HVSRData and HVSRBatch data both work here + if isinstance(data_noiseRemoved, HVSRData): + data_noiseRemoved = {data_noiseRemoved.site: data_noiseRemoved} + data_noiseRemoved = {data_noiseRemoved.site: data_noiseRemoved} + + for site_name in data_noiseRemoved.keys(): + data_noiseRemoved[site_name]['ProcessingStatus']['RemoveNoiseStatus'] = False + # Since noise removal is not required for data processing, check others first + if data_noiseRemoved[site_name]['ProcessingStatus']['OverallStatus']: + data_noiseRemoved[site_name]['ProcessingStatus']['OverallStatus'] = True + else: + data_noiseRemoved[site_name]['ProcessingStatus']['OverallStatus'] = False + + # If it wasn't originally HVSRBatch, make it HVSRData object again + if not data_noiseRemoved[site_name]['batch']: + data_noiseRemoved = data_noiseRemoved[site_name] + else: + if isinstance(data_noiseRemoved, HVSRData): + data_noiseRemoved = {data_noiseRemoved.site: data_noiseRemoved} + + for site_name in data_noiseRemoved.keys(): # This should work more or less the same for batch and regular data now + data_noiseRemoved[site_name]['stream_edited'] = data_noiseRemoved[site_name]['stream'] + + data_noiseRemoved[site_name]['ProcessingStatus']['RemoveNoiseStatus'] = None + + # If it wasn't originally HVSRBatch, make it HVSRData object again + #if not data_noiseRemoved[site_name]['batch']: + data_noiseRemoved = data_noiseRemoved[site_name] + except Exception as e: + if (source == 'file' or source == 'raw'): + if hasattr(e, 'message'): + errMsg = e.message + else: + errMsg = e + if not ('batch' in data_noiseRemoved.keys() and data_noiseRemoved['batch']): + raise RuntimeError(f"generate_ppsds() error: {errMsg}") + + # Generate PPSDs + ppsd_data = data_noiseRemoved + try: + generate_ppsds_kwargs = {k: v for k, v in kwargs.items() if k in tuple(inspect.signature(generate_ppsds).parameters.keys())} + PPSDkwargs = {k: v for k, v in kwargs.items() if k in tuple(inspect.signature(PPSD).parameters.keys())} + generate_ppsds_kwargs.update(PPSDkwargs) + ppsd_data = generate_ppsds(hvsr_data=ppsd_data, verbose=verbose, **generate_ppsds_kwargs) + except Exception as e: + if hasattr(e, 'message'): + errMsg = e.message + else: + errMsg = e + + print(f"Error during generate_ppsds() for {site_name}: \n{errMsg}") + if source == 'file' or source == 'raw': + raise RuntimeError(f"generate_ppsds() error: {errMsg}") + + # Reformat data so HVSRData and HVSRBatch data both work here + if isinstance(ppsd_data, HVSRData): + ppsd_data = {ppsd_data['site']: ppsd_data} + + for site_name in ppsd_data.keys(): # This should work more or less the same for batch and regular data now + ppsd_data[site_name]['ProcessingStatus']['PPSDStatus']=False + ppsd_data[site_name]['ProcessingStatus']['OverallStatus'] = False + + #If it wasn't originally HVSRBatch, make it HVSRData object again + if not ppsd_data[site_name]['batch']: + ppsd_data = ppsd_data[site_name] + + # Remove Outlier Curves + data_curvesRemoved = ppsd_data + try: + remove_outlier_curve_kwargs = {k: v for k, v in kwargs.items() if k in tuple(inspect.signature(remove_outlier_curves).parameters.keys())} + + # Check whether it is indicated to remove outlier curves + outlier_curve_keys_used = True + if remove_outlier_curve_kwargs == {} or list(remove_outlier_curve_kwargs.keys()) == ['show_plot']: + outlier_curve_keys_used = False + if outlier_curves_removal or outlier_curve_keys_used: + data_curvesRemoved = remove_outlier_curves(hvsr_data=data_curvesRemoved, verbose=verbose,**remove_outlier_curve_kwargs) + except Exception as e: + traceback.print_exception(sys.exc_info()[1]) + exc_type, exc_obj, tb = sys.exc_info() + f = tb.tb_frame + lineno = tb.tb_lineno + filename = f.f_code.co_filename + errLineNo = str(traceback.extract_tb(sys.exc_info()[2])[-1].lineno) + error_category = type(e).__name__.title().replace('error', 'Error') + error_message = f"{e} ({errLineNo})" + print(f"{error_category} ({errLineNo}): {error_message}") + print(lineno, filename, f) + + # Reformat data so HVSRData and HVSRBatch data both work here + if isinstance(data_curvesRemoved, HVSRData): + data_curvesRemoved_interim = {data_curvesRemoved['site']: data_curvesRemoved} + else: + data_curvesRemoved_interim = data_curvesRemoved + + for site_name in data_curvesRemoved_interim.keys(): # This should work more or less the same for batch and regular data now + data_curvesRemoved_interim[site_name]['ProcessingStatus']['RemoveOutlierCurvesStatus'] = False + data_curvesRemoved_interim[site_name]['ProcessingStatus']['OverallStatus'] = False + + #If it wasn't originally HVSRBatch, make it HVSRData object again + if not data_curvesRemoved_interim[site_name]['batch']: + data_curvesRemoved_interim = data_curvesRemoved_interim[site_name] + data_curvesRemoved = data_curvesRemoved_interim + + # Process HVSR Curves + hvsr_results = data_curvesRemoved + try: + process_hvsr_kwargs = {k: v for k, v in kwargs.items() if k in tuple(inspect.signature(process_hvsr).parameters.keys())} + hvsr_results = process_hvsr(hvsr_data=ppsd_data, verbose=verbose, **process_hvsr_kwargs) + except Exception as e: + traceback.print_exception(sys.exc_info()[1]) + exc_type, exc_obj, tb = sys.exc_info() + f = tb.tb_frame + lineno = tb.tb_lineno + filename = f.f_code.co_filename + errLineNo = str(traceback.extract_tb(sys.exc_info()[2])[-1].lineno) + error_category = type(e).__name__.title().replace('error', 'Error') + error_message = f"{e} ({errLineNo})" + print(f"{error_category} ({errLineNo}): {error_message}") + print(lineno, filename, f) + + if isinstance(hvsr_results, HVSRData): + hvsr_results = {hvsr_results['site']: hvsr_results} + + for site_name in hvsr_results.keys(): # This should work more or less the same for batch and regular data now + + hvsr_results[site_name]['ProcessingStatus']['HVStatus']=False + hvsr_results[site_name]['ProcessingStatus']['OverallStatus'] = False + + # If it wasn't originally HVSRBatch, make it HVSRData object again + if not hvsr_results[site_name]['batch']: + hvsr_results = hvsr_results[site_name] + + # Final post-processing/reporting + # Check peaks + check_peaks_kwargs = {k: v for k, v in kwargs.items() if k in tuple(inspect.signature(check_peaks).parameters.keys())} + hvsr_results = check_peaks(hvsr_data=hvsr_results, verbose=verbose, **check_peaks_kwargs) + + get_report_kwargs = {k: v for k, v in kwargs.items() if k in tuple(inspect.signature(get_report).parameters.keys())} + # Add 'az' as a default plot if the following conditions + # first check if report_formats is specified, if not, add default value + if 'report_formats' not in get_report_kwargs.keys(): + get_report_kwargs['report_formats'] = inspect.signature(get_report).parameters['report_formats'].default + + # Now, check if plot is specified, then if plot_type is specified, then add 'az' if stream has azimuths + if 'plot' in get_report_kwargs['report_formats']: + plot_hvsr_kwargs = {k: v for k, v in kwargs.items() if k in tuple(inspect.signature(plot_hvsr).parameters.keys())} + get_report_kwargs.update(plot_hvsr_kwargs) + usingDefault = True + if 'plot_type' not in get_report_kwargs.keys(): + get_report_kwargs['plot_type'] = inspect.signature(get_report).parameters['plot_type'].default + else: + usingDefault = False + + # Check if az is already specified as plot output + azList = ['azimuth', 'az', 'a', 'radial', 'r'] + az_requested = False + + get_report_kwargs['plot_type'] = [item.lower() for item in get_report_kwargs['plot_type'].split(' ')] + for azStr in azList: + if azStr.lower() in get_report_kwargs['plot_type']: + az_requested = True + break + get_report_kwargs['plot_type'] = ' '.join(get_report_kwargs['plot_type']) + + if isinstance(hvsr_results, HVSRData): + hvsr_results_interim = {hvsr_results['site']: hvsr_results} + else: + hvsr_results_interim = hvsr_results + + for site_name in hvsr_results_interim.keys(): # This should work more or less the same for batch and regular data now + # Check if data has azimuth data + hasAz = False + if 'stream' in hvsr_results_interim[site_name].keys(): + for tr in hvsr_results_interim[site_name]['stream']: + if tr.stats.component == 'R': + hasAz = True + break + + # Assuming all sites in batch have az if one does + if hasAz: + break + + # If it wasn't originally HVSRBatch, make it HVSRData object again + #if not hvsr_results_interim[site_name]['batch']: + # hvsr_results_interim = hvsr_results_interim[site_name] + + # Add azimuth as a requested plot if azimuthal data exists but not requested in plot + if not az_requested and hasAz and hvsr_results.horizontal_method != 'Single Azimuth': + get_report_kwargs['plot_type'] = get_report_kwargs['plot_type'] + ' az' + get_report(hvsr_results=hvsr_results, verbose=verbose, **get_report_kwargs) + + if verbose: + if 'report_formats' in get_report_kwargs.keys(): + if type(get_report_kwargs['report_formats']) is str: + report_formats = get_report_kwargs['report_formats'].lower() + elif isinstance(get_report_kwargs['report_formats'], (tuple, list)): + for i, rf in enumerate(get_report_kwargs['report_formats']): + get_report_kwargs['report_formats'][i] = rf.lower() + + # if report_formats is 'print', we would have already printed it in previous step + if get_report_kwargs['report_formats'] == 'print' or 'print' in get_report_kwargs['report_formats'] or isinstance(hvsr_results, HVSRBatch): + # We do not need to print another report if already printed to terminal + pass + else: + # We will just change the report_formats kwarg to print, since we already got the originally intended report format above, + # now need to print for verbose output + get_report_kwargs['report_formats'] = 'print' + get_report(hvsr_results=hvsr_results, **get_report_kwargs) + + if get_report_kwargs['report_formats'] == 'plot' or 'plot' in get_report_kwargs['report_formats']: + # We do not need to plot another report if already plotted + pass + else: + # hvplot_kwargs = {k: v for k, v in kwargs.items() if k in plot_hvsr.__code__.co_varnames} + # hvsr_results['HV_Plot'] = plot_hvsr(hvsr_results, return_fig=True, show_plot=False, close_figs=True) + pass + else: + pass + + # Export processed data if hvsr_export_path(as pickle currently, default .hvsr extension) + if 'hvsr_export_path' in kwargs.keys(): + if kwargs['hvsr_export_path'] is None: + pass + else: + if 'ext' in kwargs.keys(): + ext = kwargs['ext'] + else: + ext = 'hvsr' + export_data(hvsr_data=hvsr_results, hvsr_export_path=kwargs['hvsr_export_path'], ext=ext, verbose=verbose) + if 'show_plot' in kwargs: + if not kwargs['show_plot']: + plt.close() + + return hvsr_results
+ + + +# Read data as batch +
+[docs] +def batch_data_read(batch_data, batch_type='table', param_col=None, batch_params=None, verbose=False, **readcsv_getMeta_fetch_kwargs): + """Function to read data in data as a batch of multiple data files. This is best used through sprit.fetch_data(*args, source='batch', **other_kwargs). + + Parameters + ---------- + batch_data : filepath or list + Input data information for how to read in data as batch. Can be filepath or list of filepaths/stream objects. + If filepath, should point to .csv (or similar that can be read by pandas.read_csv()) with batch data information. + batch_type : str, optional + Type of batch read, only 'table' and 'filelist' accepted. + If 'table', will read data from a file read in using pandas.read_csv(), by default 'table' + param_col : None or str, optional + Name of parameter column from batch information file. Only used if a batch_type='table' and single parameter column is used, rather than one column per parameter (for single parameter column, parameters are formatted with = between keys/values and , between item pairs), by default None + batch_params : list, dict, or None, default = None + Parameters to be used if batch_type='filelist'. If it is a list, needs to be the same length as batch_data. If it is a dict, will be applied to all files in batch_data and will combined with extra keyword arguments caught by **readcsv_getMeta_fetch_kwargs. + verbose : bool, optional + Whether to print information to terminal during batch read, by default False + **readcsv_getMeta_fetch_kwargs + Keyword arguments that will be read into pandas.read_csv(), sprit.input_params, sprit.get_metadata(), and/or sprit.fetch_data() + + Returns + ------- + hvsrBatch + HVSRBatch object with each item representing a different HVSRData object + + Raises + ------ + IndexError + _description_ + """ + + if verbose: + print(f'Processing batch data from {batch_type}:') + print(f" Batch data source: {batch_data}") + + # First figure out which parameters go with which function + input_params_params = inspect.signature(input_params).parameters + get_metadata_params = inspect.signature(get_metadata).parameters + fetch_data_params = inspect.signature(fetch_data).parameters + calculate_azimuth_params = inspect.signature(calculate_azimuth).parameters + remove_noise_params = inspect.signature(remove_noise).parameters + generate_ppsds_params = inspect.signature(generate_ppsds).parameters + remove_outlier_curves_params = inspect.signature(remove_outlier_curves).parameters + process_hvsr_params = inspect.signature(process_hvsr).parameters + check_peaks_params = inspect.signature(check_peaks).parameters + get_report_params = inspect.signature(get_report).parameters + + dict_of_params = {'input_params': input_params_params, + 'get_metadata': get_metadata_params, + 'fetch_data_params': fetch_data_params, + 'calculate_azimuth_params': calculate_azimuth_params, + 'remove_noise_params': remove_noise_params, + 'generate_ppsds_params': generate_ppsds_params, + 'remove_outlier_curves_params': remove_outlier_curves_params, + 'process_hvsr_params': process_hvsr_params, + 'check_peaks_params': check_peaks_params, + 'get_report_params': get_report_params} + + # Get a list of all functions (for which paramters are used) in sprit.run() + run_functions_list = [input_params, fetch_data, + get_metadata, calculate_azimuth, + remove_noise, generate_ppsds, remove_outlier_curves, + process_hvsr, check_peaks, + get_report, export_data] + + # Get default values of all functions in a dict + default_dict = {} + for i, fun in enumerate(run_functions_list): + for param_name, param_info in inspect.signature(fun).parameters.items(): + if param_info.default is not inspect._empty: + default_dict[param_name] = param_info.default + + if batch_type == 'sample': + sample_data = True + batch_type = 'table' + else: + sample_data = False + + # Dictionary to store the stream objects + stream_dict = {} + data_dict = {} + if batch_type == 'table': + if isinstance(batch_data, pd.DataFrame): + dataReadInfoDF = batch_data + elif isinstance(batch_data, dict): + # For params input + pass + else: # Read csv + read_csv_kwargs = {k: v for k, v in locals()['readcsv_getMeta_fetch_kwargs'].items() if k in inspect.signature(pd.read_csv).parameters} + dataReadInfoDF = pd.read_csv(batch_data, **read_csv_kwargs) + if 'input_data' in dataReadInfoDF.columns: + filelist = list(dataReadInfoDF['input_data']) + + # If this is sample data, we need to create absolute paths to the filepaths + if sample_data: + sample_data_dir = pathlib.Path(pkg_resources.resource_filename(__name__, 'resources/sample_data/')) + for index, row in dataReadInfoDF.iterrows(): + dataReadInfoDF.loc[index, 'input_data'] = sample_data_dir.joinpath(row.loc['input_data']) + + # Generate site names if they don't exist already + if 'site' not in dataReadInfoDF.columns: + siterows = [] + filldigs = len(str(dataReadInfoDF.shape[0])) # Number of digits in df shape + for i, row in dataReadInfoDF.iterrows(): + siterows.append(f'HVSRSite_{str(i).zfill(filldigs)}') + dataReadInfoDF['site'] = siterows + + # Print information about batch read, as specified + print(f" {dataReadInfoDF.shape[0]} sites found: {list(dataReadInfoDF['site'])}") + if verbose: + maxLength = 25 + maxColWidth = 12 + if dataReadInfoDF.shape[0] > maxLength: + print(f'\t Showing information for first {maxLength} files only:') + print() + + # Print nicely formatted df + # Print column names + print(' ', end='') + for col in dataReadInfoDF.columns: + print(str(col)[:maxColWidth].ljust(maxColWidth), end=' ') + + print('\n', end='') + # Print separator + tableLen = (maxColWidth+2)*len(dataReadInfoDF.columns) + for r in range(tableLen): + print('-', end='') + print() + + #Print columns/rows + for index, row in dataReadInfoDF.iterrows(): + print(' ', end='') + for col in row: + if len(str(col)) > maxColWidth: + print((str(col)[:maxColWidth-3]+'...').ljust(maxColWidth), end=' ') + else: + print(str(col)[:maxColWidth].ljust(maxColWidth), end=' ') + print() + if dataReadInfoDF.shape[0] > maxLength: + endline = f'\t...{dataReadInfoDF.shape[0]-maxLength} more rows in file.\n' + else: + endline = '\n' + print(endline) + + print('Fetching the following files:') + + param_dict_list = [] + verboseStatement = [] + if param_col is None: # Not a single parameter column, each col=parameter + for row_ind in range(dataReadInfoDF.shape[0]): + param_dict = {} + verboseStatement.append([]) + for col in dataReadInfoDF.columns: + for fun in run_functions_list: + if col in inspect.signature(fun).parameters: + currParam = dataReadInfoDF.loc[row_ind, col] + if pd.isna(currParam) or currParam == 'nan': + if col in default_dict.keys(): + param_dict[col] = default_dict[col] # Get default value + if verbose: + if type(default_dict[col]) is str: + verboseStatement[row_ind].append("\t\t'{}' parameter not specified in batch file. Using {}='{}'".format(col, col, default_dict[col])) + else: + verboseStatement[row_ind].append("\t\t'{}' parameter not specified in batch file. Using {}={}".format(col, col, default_dict[col])) + else: + param_dict[col] = None + else: + param_dict[col] = dataReadInfoDF.loc[row_ind, col] + param_dict_list.append(param_dict) + else: + if param_col not in dataReadInfoDF.columns: + raise IndexError('{} is not a column in {} (columns are: {})'.format(param_col, batch_data, dataReadInfoDF.columns)) + for row in dataReadInfoDF[param_col]: + param_dict = {} + splitRow = str(row).split(',') + for item in splitRow: + param_dict[item.split('=')[0]] = item.split('=')[1] + param_dict_list.append(param_dict) + + elif batch_type == 'filelist': + if not isinstance(batch_data, (list, tuple)): + raise RuntimeError(f"If batch_type is specified as 'filelist' or 'list', batch_data must be list or tuple, not {type(batch_data)}.") + + # Update formatting of batch_params for rest of processing + if batch_params is None: + batch_params = [{}] * len(batch_data) + + if isinstance(batch_params, list): + if len(batch_params) != len(batch_data): + raise RuntimeError('If batch_params is list, it must be the same length as batch_data. len(batch_params)={} != len(batch_data)={}'.format(len(batch_params), len(batch_data))) + param_dict_list = batch_params + elif isinstance(batch_params, dict): + batch_params.update(readcsv_getMeta_fetch_kwargs) + param_dict_list = [] + for i in range(len(batch_data)): + param_dict_list.append(batch_params) + + # Read and process each MiniSEED file + for i, file in enumerate(batch_data): + param_dict_list[i]['input_data'] = file + + hvsr_metaDict = {} + zfillDigs = len(str(len(param_dict_list))) # Get number of digits of length of param_dict_list + i = 0 + for i, param_dict in enumerate(param_dict_list): + # Read the data file into a Stream object + input_params_kwargs = {k: v for k, v in locals()['readcsv_getMeta_fetch_kwargs'].items() if k in inspect.signature(input_params).parameters} + input_params_kwargs2 = {k: v for k, v in param_dict.items() if k in inspect.signature(input_params).parameters} + input_params_kwargs.update(input_params_kwargs2) + + # Run input_params + try: + ipverboseString = '\tinput_params: <No parameters specified>, ' + for arg, value in input_params_kwargs.items(): + ipverboseString = ipverboseString.replace('<No parameters specified>, ', '') + ipverboseString += f"{arg}={value}, " + ipverboseString = ipverboseString[:-2] + ipverboseString = (ipverboseString[:96] + '...') if len(ipverboseString) > 99 else ipverboseString + + params = input_params(**input_params_kwargs) + except Exception as e: + params['ProcessingStatus']['InputParamsStatus'] = False + params['ProcessingStatus']['OverallStatus'] = False + verboseStatement.append(f"\t{e}") + + fetch_data_kwargs = {k: v for k, v in locals()['readcsv_getMeta_fetch_kwargs'].items() if k in inspect.signature(fetch_data).parameters} + fetch_data_kwargs2 = {k: v for k, v in param_dict.items() if k in inspect.signature(fetch_data).parameters} + fetch_data_kwargs.update(fetch_data_kwargs2) + + try: + fdverboseString = '\tfetch_data: <No parameters specified>, ' + for arg, value in fetch_data_kwargs.items(): + fdverboseString = fdverboseString.replace('<No parameters specified>, ', '') + fdverboseString += f"{arg}={value}, " + fdverboseString = fdverboseString[:-2] + fdverboseString = (fdverboseString[:96] + '...') if len(fdverboseString) > 99 else fdverboseString + + hvsrData = fetch_data(params=params, **fetch_data_kwargs) + except Exception as e: + hvsrData['ProcessingStatus']['FetchDataStatus'] = False + hvsrData['ProcessingStatus']['OverallStatus'] = False + verboseStatement.append(f"\t{e}") + + if verbose and hvsrData['ProcessingStatus']['OverallStatus']: + print(f" {hvsrData['site']}") + print(ipverboseString) + print(fdverboseString) + if verboseStatement != []: + for item in verboseStatement[i]: + print(item) + elif verbose and not hvsrData['ProcessingStatus']['OverallStatus']: + if 'site' in param_dict.keys(): + sitename = param_dict['site'] + else: + sitename = 'UNSPECIFIED_SITE' + + print(f" {sitename}") + print(ipverboseString) + print(fdverboseString) + if verboseStatement != []: + for item in verboseStatement[i]: + print(item) + print(f" *{sitename} not read correctly. Processing will not be carried out.") + + hvsrData['batch'] = True + + # This may be redundant + if hvsrData['site'] == default_dict['site']: # If site was not designated + hvsrData['site'] = "{}_{}".format(hvsrData['site'], str(i).zfill(zfillDigs)) + i += 1 + + # Get processing parameters for other functions in sprit.run() besides input_params and fetch_data + if 'processing_parameters' in hvsrData.keys(): + processing_parameters = hvsrData['processing_parameters'].copy() + else: + processing_parameters = {} #"input_params": input_params_kwargs, "fetch_data": fetch_data_kwargs} + + for fun in run_functions_list: + specified_params = {k: v for k, v in param_dict.items() if k in inspect.signature(fun).parameters} + processing_parameters[fun.__name__] = specified_params + + hvsrData['processing_parameters'] = processing_parameters + if 'source' not in hvsrData['processing_parameters']['fetch_data'].keys(): + hvsrData['processing_parameters']['fetch_data']['source'] = 'file' + + hvsr_metaDict[hvsrData['site']] = hvsrData + + hvsrBatch = HVSRBatch(hvsr_metaDict) + + print() + print('Finished reading input data in preparation of batch processing') + return hvsrBatch
+ + + +# Function to generate azimuthal readings from the horizontal components +
+[docs] +def calculate_azimuth(hvsr_data, azimuth_angle=30, azimuth_type='multiple', azimuth_unit='degrees', show_az_plot=False, verbose=False, **plot_azimuth_kwargs): + """Function to calculate azimuthal horizontal component at specified angle(s). Adds each new horizontal component as a radial component to obspy.Stream object at hvsr_data['stream'] + + Parameters + ---------- + hvsr_data : HVSRData + Input HVSR data + azimuth_angle : int, default=10 + If `azimuth_type='multiple'`, this is the angular step (in unit `azimuth_unit`) of each of the azimuthal measurements. + If `azimuth_type='single'` this is the angle (in unit `azimuth_unit`) of the single calculated azimuthal measruement. By default 10. + azimuth_type : str, default='multiple' + What type of azimuthal measurement to make, by default 'multiple'. + If 'multiple' (or {'multi', 'mult', 'm'}), will take a measurement at each angular step of azimuth_angle of unit azimuth_unit. + If 'single' (or {'sing', 's'}), will take a single azimuthal measurement at angle specified in azimuth_angle. + azimuth_unit : str, default='degrees' + Angular unit used to specify `azimuth_angle` parameter. By default 'degrees'. + If 'degrees' (or {'deg', 'd'}), will use degrees. + If 'radians' (or {'rad', 'r'}), will use radians. + show_az_plot : bool, default=False + Whether to show azimuthal plot, by default False. + verbose : bool, default=False + Whether to print terminal output, by default False + + Returns + ------- + HVSRData + Updated HVSRData object specified in hvsr_data with hvsr_data['stream'] attribute containing additional components (EHR-***), + with *** being zero-padded (3 digits) azimuth angle in degrees. + """ + # Get intput paramaters + orig_args = locals().copy() + start_time = datetime.datetime.now() + + # Update with processing parameters specified previously in input_params, if applicable + if 'processing_parameters' in hvsr_data.keys(): + if 'calculate_azimuth' in hvsr_data['processing_parameters'].keys(): + update_msg = [] + for k, v in hvsr_data['processing_parameters']['calculate_azimuth'].items(): + defaultVDict = dict(zip(inspect.getfullargspec(calculate_azimuth).args[1:], + inspect.getfullargspec(calculate_azimuth).defaults)) + # Manual input to function overrides the imported parameter values + if (not isinstance(v, (HVSRData, HVSRBatch))) and (k in orig_args.keys()) and (orig_args[k]==defaultVDict[k]): + update_msg.append(f'\t\t{k} = {v} (previously {orig_args[k]})') + orig_args[k] = v + + azimuth_angle = orig_args['azimuth_angle'] + azimuth_unit = orig_args['azimuth_unit'] + show_az_plot = orig_args['show_az_plot'] + verbose = orig_args['verbose'] + + if (verbose and isinstance(hvsr_data, HVSRBatch)) or (verbose and not hvsr_data['batch']): + if isinstance(hvsr_data, HVSRData) and hvsr_data['batch']: + pass + else: + print('\nGenerating azimuthal data (calculate_azimuth())') + print('\tUsing the following parameters:') + for key, value in orig_args.items(): + if key == 'hvsr_data': + pass + else: + print('\t {}={}'.format(key, value)) + + if 'processing_parameters' in hvsr_data.keys() and 'calculate_azimuth' in hvsr_data['processing_parameters'].keys(): + if update_msg != []: + print() + update_msg.insert(0, '\tThe following parameters were updated using the processing_parameters attribute:') + for msg_line in update_msg: + print(msg_line) + print() + + if isinstance(hvsr_data, HVSRBatch): + #If running batch, we'll loop through each site + hvsr_out = {} + for site_name in hvsr_data.keys(): + args = orig_args.copy() #Make a copy so we don't accidentally overwrite + args['hvsr_data'] = hvsr_data[site_name] #Get what would normally be the "hvsr_data" variable for each site + if hvsr_data[site_name]['ProcessingStatus']['OverallStatus']: + try: + hvsr_out[site_name] = __azimuth_batch(**args) #Call another function, that lets us run this function again + except Exception as e: + hvsr_out[site_name]['ProcessingStatus']['Azimuth'] = False + hvsr_out[site_name]['ProcessingStatus']['OverallStatus'] = False + if verbose: + print(e) + else: + hvsr_data[site_name]['ProcessingStatus']['Azimuth'] = False + hvsr_data[site_name]['ProcessingStatus']['OverallStatus'] = False + hvsr_out = hvsr_data + + output = HVSRBatch(hvsr_out) + return output + elif isinstance(hvsr_data, (HVSRData, dict, obspy.Stream)): + + degList = ['degrees', 'deg', 'd'] + radList = ['radians', 'rad', 'r'] + if azimuth_unit.lower() in degList: + az_angle_rad = np.deg2rad(azimuth_angle) + az_angle_deg = azimuth_angle + elif azimuth_unit.lower() in radList: + az_angle_rad = azimuth_angle + az_angle_deg = np.rad2deg(azimuth_angle) + else: + warnings.warn(f"azimuth_unit={azimuth_unit} not supported. Try 'degrees' or 'radians'. No azimuthal analysis run.") + return hvsr_data + + # Limit to 1-180 (and "right" half of compass) (will be reflected on other half if applicable to save computation time) + conversion_message = '' + will_convert = False + if az_angle_deg < 0: + will_convert = True + conversion_message = conversion_message + 'converted to a positive value' + if az_angle_deg < -180: + conversion_message = conversion_message + ' between 0 and 180 degrees' + + if az_angle_deg > 180: + will_convert = True + conversion_message = conversion_message + ' converted to a value between 0 and 180 degrees' + + if will_convert: + conversion_message = f"\tThe azimuth angle specified will be{conversion_message}" + + if verbose: + print(conversion_message, end=f': {az_angle_deg}') + # Convert angle to 0-180 + az_angle_deg = az_angle_deg - (180 * (az_angle_deg // 180)) + az_angle_rad = az_angle_rad = np.deg2rad(azimuth_angle) + + if verbose: + print(f' degrees --> {az_angle_deg} degrees.') + + multAzList = ['multiple', 'multi', 'mult', 'm'] + singleAzList = ['single', 'sing', 's'] + if azimuth_type.lower() in multAzList: + azimuth_list = list(np.arange(0, np.pi, az_angle_rad)) + azimuth_list_deg = list(np.arange(0, 180, az_angle_deg)) + elif azimuth_type.lower() in singleAzList: + azimuth_list = [az_angle_rad] + azimuth_list_deg = [az_angle_deg] + else: + warnings.warn(f"azimuth_type={azimuth_type} not supported. Try 'multiple' or 'single'. No azimuthal analysis run.") + return hvsr_data + + if isinstance(hvsr_data, (HVSRData, dict)): + zComp = hvsr_data['stream'].select(component='Z').merge() + eComp = hvsr_data['stream'].select(component='E').merge() + nComp = hvsr_data['stream'].select(component='N').merge() + elif isinstance(hvsr_data, obspy.Stream): + zComp = hvsr_data.select(component='Z').merge() + eComp = hvsr_data.select(component='E').merge() + nComp = hvsr_data.select(component='N').merge() + + # Reset stats for original data too + zComp[0].stats['azimuth_deg'] = 0 + eComp[0].stats['azimuth_deg'] = 90 + nComp[0].stats['azimuth_deg'] = 0 + + zComp[0].stats['azimuth_rad'] = 0 + eComp[0].stats['azimuth_rad'] = np.pi/2 + nComp[0].stats['azimuth_rad'] = 0 + + zComp[0].stats['location'] = '000' + eComp[0].stats['location'] = '090' + nComp[0].stats['location'] = '000' + + statsDict = {} + for key, value in eComp[0].stats.items(): + statsDict[key] = value + + for i, az_rad in enumerate(azimuth_list): + az_deg = azimuth_list_deg[i] + statsDict['location'] = f"{str(round(az_deg,0)).zfill(3)}" #Change location name + statsDict['channel'] = f"EHR"#-{str(round(az_deg,0)).zfill(3)}" #Change channel name + statsDict['azimuth_deg'] = az_deg + statsDict['azimuth_rad'] = az_rad + + hasMask = [False, False] + if np.ma.is_masked(nComp[0].data): + nData = nComp[0].data.data + nMask = nComp[0].data.mask + hasMask[0] = True + else: + nData = nComp[0].data + nMask = [True] * len(nData) + + if np.ma.is_masked(eComp[0].data): + eData = eComp[0].data.data + eMask = eComp[0].data.mask + hasMask[1] = True + else: + eData = eComp[0].data + eMask = [True] * len(eData) + + # From hvsrpy: horizontal = self.ns._amp * math.cos(az_rad) + self.ew._amp*math.sin(az_rad) + if True in hasMask: + radial_comp_data = np.ma.array(np.add(nData * np.cos(az_rad), eData * np.sin(az_angle_rad)), mask=list(map(operator.and_, nMask, eMask))) + else: + radial_comp_data = np.add(nData * np.cos(az_rad), eData * np.sin(az_rad)) + + radial_trace = obspy.Trace(data=radial_comp_data, header=statsDict) + hvsr_data['stream'].append(radial_trace) + + # Verbose printing + if verbose and not isinstance(hvsr_data, HVSRBatch): + dataINStr = hvsr_data.stream.__str__().split('\n') + for line in dataINStr: + print('\t\t', line) + + if show_az_plot: + hvsr_data['Azimuth_Fig'] = plot_azimuth(hvsr_data=hvsr_data, **plot_azimuth_kwargs) + + hvsr_data['ProcessingStatus']['CalculateAzimuth'] = True + hvsr_data = _check_processing_status(hvsr_data, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose) + + return hvsr_data
+ + + +# Quality checks, stability tests, clarity tests +# def check_peaks(hvsr, x, y, index_list, peak, peakm, peakp, hvsr_peaks, stdf, hvsr_log_std, rank, hvsr_band=[0.4, 40], do_rank=False): +
+[docs] +def check_peaks(hvsr_data, hvsr_band=[0.4, 40], peak_selection='max', peak_freq_range=[0.4, 40], azimuth='HV', verbose=False): + """Function to run tests on HVSR peaks to find best one and see if it passes quality checks + + Parameters + ---------- + hvsr_data : dict + Dictionary containing all the calculated information about the HVSR data (i.e., hvsr_out returned from process_hvsr) + hvsr_band : tuple or list, default=[0.4, 40] + 2-item tuple or list with lower and upper limit of frequencies to analyze + peak_selection : str or numeric, default='max' + How to select the "best" peak used in the analysis. For peak_selection="max" (default value), the highest peak within peak_freq_range is used. + For peak_selection='scored', an algorithm is used to select the peak based in part on which peak passes the most SESAME criteria. + If a numeric value is used (e.g., int or float), this should be a frequency value to manually select as the peak of interest. + peak_freq_range : tuple or list, default=[0.4, 40]; + The frequency range within which to check for peaks. If there is an HVSR curve with multiple peaks, this allows the full range of data to be processed while limiting peak picks to likely range. + verbose : bool, default=False + Whether to print results and inputs to terminal. + + Returns + ------- + hvsr_data : HVSRData or HVSRBatch object + Object containing previous input data, plus information about peak tests + """ + orig_args = locals().copy() # Get the initial arguments + + # Update with processing parameters specified previously in input_params, if applicable + if 'processing_parameters' in hvsr_data.keys(): + if 'check_peaks' in hvsr_data['processing_parameters'].keys(): + update_msg = [] + for k, v in hvsr_data['processing_parameters']['check_peaks'].items(): + defaultVDict = dict(zip(inspect.getfullargspec(check_peaks).args[1:], + inspect.getfullargspec(check_peaks).defaults)) + # Manual input to function overrides the imported parameter values + if (not isinstance(v, (HVSRData, HVSRBatch))) and (k in orig_args.keys()) and (orig_args[k]==defaultVDict[k]): + update_msg.append(f'\t\t{k} = {v} (previously {orig_args[k]})') + orig_args[k] = v + + hvsr_band = orig_args['hvsr_band'] + peak_selection = orig_args['peak_selection'] + peak_freq_range = orig_args['peak_freq_range'] + verbose = orig_args['verbose'] + + #if (verbose and 'input_params' not in hvsr_data.keys()) or (verbose and not hvsr_data['batch']): + # if isinstance(hvsr_data, HVSRData) and hvsr_data['batch']: + # pass + # else: + if verbose: + print('\nChecking peaks in the H/V Curve (check_peaks())') + print('\tUsing the following parameters:') + for key, value in orig_args.items(): + if key == 'hvsr_data': + pass + else: + print('\t {}={}'.format(key, value)) + print() + + if 'processing_parameters' in hvsr_data.keys() and 'check_peaks' in hvsr_data['processing_parameters'].keys(): + + if update_msg != []: + update_msg.insert(0, '\tThe following parameters were updated using the processing_parameters attribute:') + for msg_line in update_msg: + print(msg_line) + print() + + # First, divide up for batch or not + if isinstance(hvsr_data, HVSRBatch): + if verbose: + print('\t Running in batch mode') + #If running batch, we'll loop through each site + for site_name in hvsr_data.keys(): + args = orig_args.copy() #Make a copy so we don't accidentally overwrite + args['hvsr_data'] = hvsr_data[site_name] #Get what would normally be the "params" variable for each site + if hvsr_data[site_name]['ProcessingStatus']['OverallStatus']: + try: + hvsr_data[site_name] = __check_peaks_batch(**args) #Call another function, that lets us run this function again + except: + if verbose: + print(f"\t{site_name}: check_peaks() unsuccessful. Peaks not checked.") + else: + warnings.warn(f"\t{site_name}: check_peaks() unsuccessful. Peaks not checked.", RuntimeWarning) + + hvsr_data = HVSRBatch(hvsr_data) + else: + HVColIDList = ['_'.join(col_name.split('_')[2:]) for col_name in hvsr_data['hvsr_windows_df'].columns if col_name.startswith('HV_Curves') and 'Log' not in col_name] + HVColIDList[0] = 'HV' + if hvsr_data['ProcessingStatus']['OverallStatus']: + if not hvsr_band: + hvsr_band = [0.4, 40] + + hvsr_data['hvsr_band'] = hvsr_band + + anyK = list(hvsr_data['x_freqs'].keys())[0] + + hvsr_data['PeakReport'] = {} + hvsr_data['BestPeak'] = {} + for i, col_id in enumerate(HVColIDList): + x = hvsr_data['x_freqs'][anyK] # Consistent for all curves + if col_id == 'HV': + y = hvsr_data['hvsr_curve'] # Calculated based on "Use" column + else: + y = hvsr_data['hvsr_az'][col_id] + + scorelist = ['score', 'scored', 'best', 's'] + maxlist = ['max', 'highest', 'm'] + # Convert peak_selection to numeric, get index of nearest value as list item for __init_peaks() + try: + peak_val = float(peak_selection) + index_list = [np.argmin(np.abs(x - peak_val))] + except Exception as e: + # If score method is being used, get index list for __init_peaks() + if peak_selection in scorelist: + index_list = hvsr_data['hvsr_peak_indices'][col_id] #Calculated based on hvsr_curve + elif peak_selection in maxlist: + #Get max index as item in list for __init_peaks() + startInd = np.argmin(np.abs(x - peak_freq_range[0])) + endInd = np.argmin(np.abs(x - peak_freq_range[1])) + if startInd > endInd: + holder = startInd + startInd = endInd + endInd = holder + subArrayMax = np.argmax(y[startInd:endInd]) + + # If max val is in subarray, this will be the same as the max of curve + # Otherwise, it will be the index of the value that is max within peak_freq_range + index_list = [subArrayMax+startInd] + + hvsrp = hvsr_data['hvsrp'][col_id] # Calculated based on "Use" column + hvsrm = hvsr_data['hvsrm'][col_id] # Calculated based on "Use" column + + hvsrPeaks = hvsr_data['hvsr_windows_df'][hvsr_data['hvsr_windows_df']['Use']]['CurvesPeakIndices_'+col_id] + + hvsr_log_std = hvsr_data['hvsr_log_std'][col_id] + peak_freq_range = hvsr_data['peak_freq_range'] + + # Do for hvsr + peak = __init_peaks(x, y, index_list, hvsr_band, peak_freq_range) + + peak = __check_curve_reliability(hvsr_data, peak, col_id) + peak = __check_clarity(x, y, peak, do_rank=True) + + # Do for hvsrp + # Find the relative extrema of hvsrp (hvsr + 1 standard deviation) + if not np.isnan(np.sum(hvsrp)): + index_p = __find_peaks(hvsrp) + else: + index_p = list() + + peakp = __init_peaks(x, hvsrp, index_p, hvsr_band, peak_freq_range) + peakp = __check_clarity(x, hvsrp, peakp, do_rank=True) + + # Do for hvsrm + # Find the relative extrema of hvsrm (hvsr - 1 standard deviation) + if not np.isnan(np.sum(hvsrm)): + index_m = __find_peaks(hvsrm) + else: + index_m = list() + + peakm = __init_peaks(x, hvsrm, index_m, hvsr_band, peak_freq_range) + peakm = __check_clarity(x, hvsrm, peakm, do_rank=True) + + # Get standard deviation of time peaks + stdf = __get_stdf(x, index_list, hvsrPeaks) + + peak = __check_freq_stability(peak, peakm, peakp) + peak = __check_stability(stdf, peak, hvsr_log_std, rank=True) + + hvsr_data['PeakReport'][col_id] = peak + + #Iterate through peaks and + # Get the BestPeak based on the peak score + # Calculate whether each peak passes enough tests + curveTests = ['WinLen','SigCycles', 'LowCurveStD'] + peakTests = ['ProminenceLow', 'ProminenceHi', 'AmpClarity', 'FreqStability', 'LowStDev_Freq', 'LowStDev_Amp'] + bestPeakScore = 0 + + for p in hvsr_data['PeakReport'][col_id]: + # Get BestPeak + if p['Score'] > bestPeakScore: + bestPeakScore = p['Score'] + bestPeak = p + + # Calculate if peak passes criteria + cTestsPass = 0 + pTestsPass = 0 + for testName in p['PassList'].keys(): + if testName in curveTests: + if p['PassList'][testName]: + cTestsPass += 1 + elif testName in peakTests: + if p['PassList'][testName]: + pTestsPass += 1 + + if cTestsPass == 3 and pTestsPass >= 5: + p['PeakPasses'] = True + else: + p['PeakPasses'] = False + + # Designate BestPeak in output dict + if len(hvsr_data['PeakReport'][col_id]) == 0: + bestPeak = {} + print(f"No Best Peak identified for {hvsr_data['site']} (azimuth {col_id})") + + hvsr_data['BestPeak'][col_id] = bestPeak + else: + for i, col_id in enumerate(HVColIDList): + if hasattr(hvsr_data, 'BestPeak'): + hvsr_data['BestPeak'][col_id] = {} + else: + print(f"Processing Errors: No Best Peak identified for {hvsr_data['site']} (azimuth {col_id})") + try: + hvsr_data.plot() + except: + pass + + hvsr_data['processing_parameters']['check_peaks'] = {} + for key, value in orig_args.items(): + hvsr_data['processing_parameters']['check_peaks'][key] = value + return hvsr_data
+ + + +# Function to export data to .hvsr file (pickled) +
+[docs] +def export_data(hvsr_data, hvsr_export_path=None, ext='hvsr', verbose=False): + """Export data into pickle format that can be read back in using import_data() so data does not need to be processed each time. + Default extension is .hvsr but it is still a pickled file that can be read in using pickle.load(). + + Parameters + ---------- + hvsr_data : HVSRData or HVSRBatch + Data to be exported + hvsr_export_path : str or filepath object, default = None + String or filepath object to be read by pathlib.Path() and/or a with open(hvsr_export_path, 'wb') statement. If None, defaults to input input_data directory, by default None + ext : str, default = 'hvsr' + Filepath extension to use for data file, by default 'hvsr' + """ + def _hvsr_export(_hvsr_data=hvsr_data, _export_path=hvsr_export_path, _ext=ext): + + fname = f"HVSRData_{_hvsr_data['site']}_{_hvsr_data['hvsr_id']}_pickled.{ext}" + if _export_path is None or _export_path is True: + _export_path = _hvsr_data['input_data'] + _export_path = pathlib.Path(_export_path).with_name(fname) + else: + _export_path = pathlib.Path(_export_path) + if _export_path.is_dir(): + _export_path = _export_path.joinpath(fname) + + _export_path = str(_export_path) + with open(_export_path, 'wb') as f: + pickle.dump(_hvsr_data, f) + + print(f"Processed data exported as pickled data to: {_export_path} [~{round(float(pathlib.Path(_export_path).stat().st_size)/2**20,1)} Mb]") + + if isinstance(hvsr_data, HVSRBatch): + for sitename in hvsr_data.keys(): + _hvsr_export(hvsr_data[sitename], hvsr_export_path, ext) + elif isinstance(hvsr_data, HVSRData): + _hvsr_export(hvsr_data, hvsr_export_path, ext) + else: + print("Error in data export. Data must be either of type sprit.HVSRData or sprit.HVSRBatch") + return
+ + + +# Function to export reports to disk in various formats +
+[docs] +def export_report(hvsr_results, report_export_path=None, report_export_format=['pdf'], azimuth='HV', csv_handling='rename', show_report=True, verbose=False): + """Function to export reports to disk. Exportable formats include: + * 'table': saves a pandas DataFrame as a csv) + * 'plot': saves the matplotlib or plotly plot figure (depending on what is designated via plot_engine) as an image (png by default) + * 'print': saves the print report as a .txt file + * 'html': saves the html report as a .html file + * 'pdf': saves the pdf report as a .pdf file + + Parameters + ---------- + hvsr_results : HVSRData object + HVSRData object containing the HVSR data + report_export_path : path-like object, optional + The path to where the report should be exported. + If this is None (default), this is written to the home directory. + If this is a True, uses the same directory as the input data, but generates a filename. + If this is a directory, generates a filename. + If filename is specified and the extension does not match the report type, the extension is adjusted. + Otherwise, this is the output file or , by default None + csv_handling : {'rename', 'append', 'overwrite', 'keep'}, optional + If table is the report type, this can prevent overwriting data, by default 'rename'. + * "rename" (or "keep"): renames the new file to prevent overwrite, appends a digit to the end of filename + * "append": appends the new data to the existing file + * "overwrite": overwrites the existing file + report_export_format : str or list, optional + The format (or a list of formats) to export the report, by default ['pdf']. + show_report : bool, optional + Whether to show the designated reports that were chosen for export, by default True + verbose : bool, optional + Whether to print progress and other information to terminal, by default False + + Returns + ------- + HVSRData + An HVSRData object that is the same as hvsr_results, but with any additionally generated reports. + """ + + if type(report_export_format) is str: + report_export_format = [report_export_format] + + for ref in report_export_format: + + if report_export_path is None: + print('The export_report(report_export_path) parameter was not specified.') + print(f'The report will be saved the home directory: {pathlib.Path.home()}') + + if ref == 'table': + ext = '.csv' + elif ref =='plot': + ext = '.png' + elif ref == 'print': + ext = '.txt' + elif ref == 'html': + ext = '.html' + else: + ref == 'pdf' + ext = '.pdf' + + sitename = hvsr_results['input_params']['site'] + fname = f"{sitename}_{hvsr_results['input_params']['acq_date']}_{str(hvsr_results['input_params']['starttime'].time)[:5]}-{str(hvsr_results['input_params']['endtime'].time)[:5]}{ext}" + fname = fname.replace(':', '') + + # Initialize output as file in home directory (if not updated) + outFile = pathlib.Path().home().joinpath(fname) + if report_export_path == True or report_export_path is None: + # Check so we don't write in sample directory + if pathlib.Path(hvsr_results['input_params']['input_data']) in sampleFileKeyMap.values(): + if pathlib.Path(os.getcwd()) in sampleFileKeyMap.values(): #Just in case current working directory is also sample directory + inFile = pathlib.Path.home() #Use the path to user's home if all else fails + else: + inFile = pathlib.Path(os.getcwd()) + else: + inFile = pathlib.Path(hvsr_results['input_params']['input_data']) + + if inFile.is_dir(): + outFile = inFile.joinpath(fname) + else: + outFile = inFile.with_name(fname) + else: + if not report_export_path: + pass + elif pathlib.Path(report_export_path).is_dir(): + outFile = pathlib.Path(report_export_path).joinpath(fname) + else: + outFile = pathlib.Path(report_export_path) + + if ref == 'table': + if not hasattr(hvsr_results, 'Table_Report'): + hvsr_results = _generate_table_report(hvsr_results, azimuth=azimuth, show_table_report=show_report, verbose=verbose) + reportDF = hvsr_results['Table_Report'] + + # Check if file already exists, and handle as specified in csv_handling + if outFile.exists(): + existFile = pd.read_csv(outFile) + + + + if csv_handling.lower() == 'append': + # Append report to existing report as new row + reportDF = pd.concat([existFile, reportDF], ignore_index=True, join='inner') + elif csv_handling.lower() == 'overwrite': + # Overwrite existing report file + pass + else: # csv_handling.lower() in ['keep', 'rename', or other]: + # Rename new report so as not to modify existing report (default handling) + if outFile.stem[-3] == '_' and outFile.stem[-2:].isdigit(): + fileDigit = int(outFile.stem[-2:]) + 1 + else: + fileDigit = 1 + fileDigit = str(fileDigit).zfill(2) + outFile = outFile.with_stem(outFile.stem + '_' + fileDigit) + + # Export to csv using pandas to_csv method + try: + print(f'\nSaving table report to: {outFile}') + reportDF.to_csv(outFile, index_label='ID') + except: + warnings.warn("Table report not exported. \n\tDataframe to be exported as csv has been saved in hvsr_results['BestPeak']['Report']['Table_Report]", category=RuntimeWarning) + + if show_report or verbose: + print('\nTable Report:\n') + maxColWidth = 13 + print(' ', end='') + for col in reportDF.columns: + if len(str(col)) > maxColWidth: + colStr = str(col)[:maxColWidth-3]+'...' + else: + colStr = str(col) + print(colStr.ljust(maxColWidth), end=' ') + print() #new line + for c in range(len(reportDF.columns) * (maxColWidth+2)): + if c % (maxColWidth+2) == 0: + print('|', end='') + else: + print('-', end='') + print('|') #new line + print(' ', end='') #Small indent at start + for row in reportDF.iterrows(): + for col in row[1]: + if len(str(col)) > maxColWidth: + colStr = str(col)[:maxColWidth-3]+'...' + else: + colStr = str(col) + print(colStr.ljust(maxColWidth), end=' ') + print() + elif ref == 'plot': + if not hasattr(hvsr_results, 'HV_Plot'): + fig = plot_hvsr(hvsr_results, return_fig=True) + hvsr_results['BestPeak'][azimuth]['Report']['HV_Plot'] = hvsr_results['HV_Plot'] = fig + + if verbose: + print(f'\nSaving plot to: {outFile}') + plt.scf = fig + plt.savefig(outFile) + elif ref == 'print': + if not hasattr(hvsr_results, "Print_Report") or hvsr_results['Print_Report'] is None: + hvsr_results = _generate_print_report(hvsr_results, azimuth=azimuth, show_print_report=show_report, verbose=verbose) + with open(outFile, 'w') as outF: + outF.write(hvsr_results['Print_Report']) + # Could write more details in the future + if show_report or verbose: + print(hvsr_results['Print_Report']) + elif ref == "html": + if not hasattr(hvsr_results, "HTML_Report") or hvsr_results['HTML_Report'] is None: + hvsr_results = _generate_html_report(hvsr_results) + with open(outFile, 'w') as outF: + outF.write(hvsr_results['HTML_Report']) + elif ref == "pdf": + hvsr_results = _generate_pdf_report(hvsr_results, pdf_report_filepath=report_export_path, show_pdf_report=show_report, verbose=verbose) + + return hvsr_results
+ + + +# **WORKING ON THIS** +# Save default instrument and processing settings to json file(s) +
+[docs] +def export_settings(hvsr_data, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True): + """Save settings to json file + + Parameters + ---------- + export_settings_path : str, default="default" + Where to save the json file(s) containing the settings, by default 'default'. + If "default," will save to sprit package resources. Otherwise, set a filepath location you would like for it to be saved to. + If 'all' is selected, a directory should be supplied. + Otherwise, it will save in the directory of the provided file, if it exists. Otherwise, defaults to the home directory. + export_settings_type : str, {'all', 'instrument', 'processing'} + What kind of settings to save. + If 'all', saves all possible types in their respective json files. + If 'instrument', save the instrument settings to their respective file. + If 'processing', saves the processing settings to their respective file. By default 'all' + include_location : bool, default=False, input CRS + Whether to include the location parametersin the exported settings document.This includes xcoord, ycoord, elevation, elev_unit, and input_crs + verbose : bool, default=True + Whether to print outputs and information to the terminal + + """ + fnameDict = {} + fnameDict['instrument'] = "instrument_settings.json" + fnameDict['processing'] = "processing_settings.json" + + if export_settings_path == 'default' or export_settings_path is True: + settingsPath = resource_dir.joinpath('settings') + else: + export_settings_path = pathlib.Path(export_settings_path) + if not export_settings_path.exists(): + if not export_settings_path.parent.exists(): + print(f'The provided value for export_settings_path ({export_settings_path}) does not exist. Saving settings to the home directory: {pathlib.Path.home()}') + settingsPath = pathlib.Path.home() + else: + settingsPath = export_settings_path.parent + + if export_settings_path.is_dir(): + settingsPath = export_settings_path + elif export_settings_path.is_file(): + settingsPath = export_settings_path.parent + fnameDict['instrument'] = export_settings_path.name+"_instrumentSettings.json" + fnameDict['processing'] = export_settings_path.name+"_processingSettings.json" + + #Get final filepaths + instSetFPath = settingsPath.joinpath(fnameDict['instrument']) + procSetFPath = settingsPath.joinpath(fnameDict['processing']) + + #Get settings values + instKeys = ["instrument", "net", "sta", "loc", "cha", "depth", "metapath", "hvsr_band"] + inst_location_keys = ['xcoord', 'ycoord', 'elevation', 'elev_unit', 'input_crs'] + procFuncs = [fetch_data, remove_noise, generate_ppsds, process_hvsr, check_peaks, get_report] + + instrument_settings_dict = {} + processing_settings_dict = {} + + for k in instKeys: + if isinstance(hvsr_data[k], pathlib.PurePath): + #For those that are paths and cannot be serialized + instrument_settings_dict[k] = hvsr_data[k].as_posix() + else: + instrument_settings_dict[k] = hvsr_data[k] + + if include_location: + for k in inst_location_keys: + if isinstance(hvsr_data[k], pathlib.PurePath): + #For those that are paths and cannot be serialized + instrument_settings_dict[k] = hvsr_data[k].as_posix() + else: + instrument_settings_dict[k] = hvsr_data[k] + + + for func in procFuncs: + funcName = func.__name__ + processing_settings_dict[funcName] = {} + for arg in hvsr_data['processing_parameters'][funcName]: + if isinstance(hvsr_data['processing_parameters'][funcName][arg], (HVSRBatch, HVSRData)): + pass + else: + processing_settings_dict[funcName][arg] = hvsr_data['processing_parameters'][funcName][arg] + + if verbose: + print("Exporting Settings") + #Save settings files + if export_settings_type.lower()=='instrument' or export_settings_type.lower()=='all': + try: + with open(instSetFPath.with_suffix('.inst').as_posix(), 'w') as instSetF: + jsonString = json.dumps(instrument_settings_dict, indent=2) + #Format output for readability + jsonString = jsonString.replace('\n ', ' ') + jsonString = jsonString.replace('[ ', '[') + jsonString = jsonString.replace('\n ]', ']') + #Export + instSetF.write(jsonString) + except: + instSetFPath = pathlib.Path.home().joinpath(instSetFPath.name) + with open(instSetFPath.with_suffix('.inst').as_posix(), 'w') as instSetF: + jsonString = json.dumps(instrument_settings_dict, indent=2) + #Format output for readability + jsonString = jsonString.replace('\n ', ' ') + jsonString = jsonString.replace('[ ', '[') + jsonString = jsonString.replace('\n ]', ']') + #Export + instSetF.write(jsonString) + + if verbose: + print(f"Instrument settings exported to {instSetFPath}") + print(f"{jsonString}") + print() + if export_settings_type.lower()=='processing' or export_settings_type.lower()=='all': + try: + with open(procSetFPath.with_suffix('.proc').as_posix(), 'w') as procSetF: + jsonString = json.dumps(processing_settings_dict, indent=2) + #Format output for readability + jsonString = jsonString.replace('\n ', ' ') + jsonString = jsonString.replace('[ ', '[') + jsonString = jsonString.replace('\n ]', ']') + jsonString = jsonString.replace('\n },','\n\t\t},\n') + jsonString = jsonString.replace('{ "', '\n\t\t{\n\t\t"') + jsonString = jsonString.replace(', "', ',\n\t\t"') + jsonString = jsonString.replace('\n }', '\n\t\t}') + jsonString = jsonString.replace(': {', ':\n\t\t\t{') + + #Export + procSetF.write(jsonString) + except: + procSetFPath = pathlib.Path.home().joinpath(procSetFPath.name) + with open(procSetFPath.with_suffix('.proc').as_posix(), 'w') as procSetF: + jsonString = json.dumps(processing_settings_dict, indent=2) + #Format output for readability + jsonString = jsonString.replace('\n ', ' ') + jsonString = jsonString.replace('[ ', '[') + jsonString = jsonString.replace('\n ]', ']') + jsonString = jsonString.replace('\n },','\n\t\t},\n') + jsonString = jsonString.replace('{ "', '\n\t\t{\n\t\t"') + jsonString = jsonString.replace(', "', ',\n\t\t"') + jsonString = jsonString.replace('\n }', '\n\t\t}') + jsonString = jsonString.replace(': {', ':\n\t\t\t{') + + #Export + procSetF.write(jsonString) + if verbose: + print(f"Processing settings exported to {procSetFPath}") + print(f"{jsonString}") + print()
+ + + +# Reads in traces to obspy stream +
+[docs] +def fetch_data(params, source='file', data_export_path=None, data_export_format='mseed', detrend='spline', detrend_order=2, update_metadata=True, plot_input_stream=False, plot_engine='matplotlib', show_plot=True, verbose=False, **kwargs): + """Fetch ambient seismic data from a source to read into obspy stream + + Parameters + ---------- + params : dict + Dictionary containing all the necessary params to get data. + Parameters defined using input_params() function. + source : str, {'raw', 'dir', 'file', 'batch'} + String indicating where/how data file was created. For example, if raw data, will need to find correct channels. + 'raw' finds raspberry shake data, from raw output copied using scp directly from Raspberry Shake, either in folder or subfolders; + 'dir' is used if the day's 3 component files (currently Raspberry Shake supported only) are all 3 contained in a directory by themselves. + 'file' is used if the params['input_data'] specified in input_params() is the direct filepath to a single file to be read directly into an obspy stream. + 'batch' is used to read a list or specified set of seismic files. + Most commonly, a csv file can be read in with all the parameters. Each row in the csv is a separate file. Columns can be arranged by parameter. + data_export_path : None or str or pathlib obj, default=None + If None (or False), data is not trimmed in this function. + Otherwise, this is the directory to save trimmed and exported data. + data_export_format: str='mseed' + If data_export_path is not None, this is the format in which to save the data + detrend : str or bool, default='spline' + If False, data is not detrended. + Otherwise, this should be a string accepted by the type parameter of the obspy.core.trace.Trace.detrend method: https://docs.obspy.org/packages/autogen/obspy.core.trace.Trace.detrend.html + detrend_order : int, default=2 + If detrend parameter is 'spline' or 'polynomial', this is passed directly to the order parameter of obspy.core.trace.Trace.detrend method. + update_metadata : bool, default=True + Whether to update the metadata file, used primarily with Raspberry Shake data which uses a generic inventory file. + plot_input_stream : bool, default=False + Whether to plot the raw input stream. This plot includes a spectrogram (Z component) and the raw (with decimation for speed) plots of each component signal. + plot_engine : str, default='matplotlib' + Which plotting library/engine to use for plotting the Input stream. Options are 'matplotlib', 'plotly', or 'obspy' (not case sensitive). + verbose : bool, default=False + Whether to print outputs and inputs to the terminal + **kwargs + Keywords arguments, primarily for 'batch' and 'dir' sources + + Returns + ------- + params : HVSRData or HVSRBatch object + Same as params parameter, but with an additional "stream" attribute with an obspy data stream with 3 traces: Z (vertical), N (North-south), and E (East-west) + """ + # Get intput paramaters + orig_args = locals().copy() + start_time = datetime.datetime.now() + + # Keep track of any updates made to raw input along the way + update_msg = [] + + # Update with processing parameters specified previously in input_params, if applicable + if 'processing_parameters' in params.keys(): + if 'fetch_data' in params['processing_parameters'].keys(): + defaultVDict = dict(zip(inspect.getfullargspec(fetch_data).args[1:], + inspect.getfullargspec(fetch_data).defaults)) + defaultVDict['kwargs'] = kwargs + for k, v in params['processing_parameters']['fetch_data'].items(): + # Manual input to function overrides the imported parameter values + if k != 'params' and k in orig_args.keys() and orig_args[k]==defaultVDict[k]: + update_msg.append(f'\t\t{k} = {v} (previously {orig_args[k]})') + orig_args[k] = v + + # Update local variables, in case of previously-specified parameters + source=orig_args['source'].lower() + data_export_path=orig_args['data_export_path'] + data_export_format=orig_args['data_export_format'] + detrend=orig_args['detrend'] + detrend_order=orig_args['detrend_order'] + update_metadata=orig_args['update_metadata'] + plot_input_stream=orig_args['plot_input_stream'] + plot_engine=orig_args['plot_engine'] + verbose=orig_args['verbose'] + kwargs=orig_args['kwargs'] + + # Print inputs for verbose setting + if verbose: #source != 'batch' and verbose: + print('\nFetching data (fetch_data())') + for key, value in orig_args.items(): + print('\t {}={}'.format(key, value)) + print() + + if 'processing_parameters' in params.keys() and 'fetch_data' in params['processing_parameters'].keys(): + if update_msg != []: + update_msg.insert(0, '\tThe following parameters were updated using the processing_parameters attribute:') + for msg_line in update_msg: + print(msg_line) + print() + + raspShakeInstNameList = ['raspberry shake', 'shake', 'raspberry', 'rs', 'rs3d', 'rasp. shake', 'raspshake'] + trominoNameList = ['tromino', 'trom', 'tromino 3g', 'tromino 3g+', 'tr', 't'] + + # Check if data is from tromino, and adjust parameters accordingly + if 'trc' in pathlib.Path(str(params['input_data'])).suffix: + if verbose and hasattr(params, 'instrument') and params['instrument'].lower() not in trominoNameList: + print(f"\t Data from tromino detected. Changing instrument from {params['instrument']} to 'Tromino'") + params['instrument'] = 'Tromino' + + # Get metadata (inventory/response information) + params = get_metadata(params, update_metadata=update_metadata, source=source) + inv = params['inv'] + date = params['acq_date'] + + # Cleanup for gui input + if isinstance(params['input_data'], (obspy.Stream, obspy.Trace)): + pass + elif '}' in str(params['input_data']): # This is how tkinter gui data comes in + params['input_data'] = params['input_data'].as_posix().replace('{', '') + params['input_data'] = params['input_data'].split('}') + + # Make sure input_data is pointing to an actual file + if isinstance(params['input_data'], list): + for i, d in enumerate(params['input_data']): + params['input_data'][i] = sprit_utils.checkifpath(str(d).strip(), sample_list=SAMPLE_LIST) + dPath = params['input_data'] + elif isinstance(params['input_data'], (obspy.Stream, obspy.Trace)): + pass + elif isinstance(params['input_data'], HVSRData): + dPath = pathlib.Path(params['input_data']['input_data']) + if not isinstance(params['input_data']['stream'], (obspy.Stream, obspy.Trace)): + try: + for k, v in params.items(): + if isinstance(v, (obspy.Trace, obspy.Stream)): + params['input_data']['stream'] = v + elif pathlib.Path(str(v)).exists(): + try: + params['input_data']['stream'] = obspy.read(v) + except Exception as e: + pass + except: + raise RuntimeError(f'The params["input_data"] parameter of fetch_data() was determined to be an HVSRData object, but no data in the "stream" attribute.') + else: + if verbose: + print('\tThe params["input_data"] argument is already an HVSRData obect.') + print("\tChecking metadata then moving on.") + else: + dPath = sprit_utils.checkifpath(params['input_data'], sample_list=SAMPLE_LIST) + + inst = params['instrument'] + + # Need to put dates and times in right formats first + if type(date) is datetime.datetime: + doy = date.timetuple().tm_yday + year = date.year + elif type(date) is datetime.date: + date = datetime.datetime.combine(date, datetime.time(hour=0, minute=0, second=0)) + doy = date.timetuple().tm_yday + year = date.year + elif type(date) is tuple: + if date[0]>366: + raise ValueError('First item in date tuple must be day of year (0-366)', 0) + elif date[1] > datetime.datetime.now().year: + raise ValueError('Second item in date tuple should be year, but given item is in the future', 0) + else: + doy = date[0] + year = date[1] + elif type(date) is str: + if '/' in date: + dateSplit = date.split('/') + elif '-' in date: + dateSplit = date.split('-') + else: + dateSplit = date + + if int(dateSplit[0]) > 31: + date = datetime.datetime(int(dateSplit[0]), int(dateSplit[1]), int(dateSplit[2])) + doy = date.timetuple().tm_yday + year = date.year + elif int(dateSplit[0])<=12 and int(dateSplit[2]) > 31: + warnings.warn("Preferred date format is 'yyyy-mm-dd' or 'yyyy/mm/dd'. Will attempt to parse date.") + date = datetime.datetime(int(dateSplit[2]), int(dateSplit[0]), int(dateSplit[1])) + doy = date.timetuple().tm_yday + year = date.year + else: + warnings.warn("Preferred date format is 'yyyy-mm-dd' or 'yyyy/mm/dd'. Cannot parse date.") + elif type(date) is int: + doy = date + year = datetime.datetime.today().year + else: + date = datetime.datetime.now() + doy = date.timetuple().tm_yday + year = date.year + warnings.warn("Did not recognize date, using year {} and day {}".format(year, doy)) + + # Select which instrument we are reading from (requires different processes for each instrument) + # Get any kwargs that are included in obspy.read + obspyReadKwargs = {} + for argName in inspect.getfullargspec(obspy.read)[0]: + if argName in kwargs.keys(): + obspyReadKwargs[argName] = kwargs[argName] + + # Select how reading will be done + if isinstance(params['input_data'], obspy.Stream): + rawDataIN = params['input_data'].copy() + tr = params['input_data'][0] + params['input_data'] = '_'.join([tr.id, str(tr.stats.starttime)[:10], + str(tr.stats.starttime)[11:19], + str(tr.stats.endtime)[11:19]]) + elif isinstance(params['input_data'], obspy.Trace): + rawDataIN = obspy.Stream(params['input_data']) + tr = params['input_data'] + params['input_data'] = '_'.join([tr.id, str(tr.stats.starttime)[:10], + str(tr.stats.starttime)[11:19], + str(tr.stats.endtime)[11:19]]) + elif isinstance(params['input_data'], HVSRData): + rawDataIN = params['input_data']['stream'] + else: + if source=='raw': + try: + if inst.lower() in raspShakeInstNameList: + rawDataIN = __read_RS_file_struct(dPath, source, year, doy, inv, params, verbose=verbose) + elif inst.lower() in trominoNameList: + params['instrument'] = 'Tromino' + params['params']['instrument'] = 'Tromino' + rawDataIN = read_tromino_files(dPath, params, verbose=verbose, **kwargs) + except: + raise RuntimeError(f"Data not fetched for {params['site']}. Check input parameters or the data file.") + elif source=='stream' or isinstance(params, (obspy.Stream, obspy.Trace)): + rawDataIN = params['input_data'].copy() + elif source=='dir': + if inst.lower() in raspShakeInstNameList: + rawDataIN = __read_RS_file_struct(dPath, source, year, doy, inv, params, verbose=verbose) + else: + obspyFiles = {} + for obForm in obspyFormats: + temp_file_glob = pathlib.Path(dPath.as_posix().lower()).glob('.'+obForm.lower()) + for f in temp_file_glob: + currParams = params + currParams['input_data'] = f + + curr_data = fetch_data(params, source='file', #all the same as input, except just reading the one file using the source='file' + data_export_path=data_export_path, data_export_format=data_export_format, detrend=detrend, detrend_order=detrend_order, update_metadata=update_metadata, verbose=verbose, **kwargs) + curr_data.merge() + obspyFiles[f.stem] = curr_data #Add path object to dict, with filepath's stem as the site name + return HVSRBatch(obspyFiles) + elif source == 'file' and str(params['input_data']).lower() not in SAMPLE_LIST: + # Read the file specified by input_data + # Automatically read tromino data + if inst.lower() in trominoNameList or 'trc' in dPath.suffix: + params['instrument'] = 'Tromino' + params['params']['instrument'] = 'Tromino' + if 'trc' in dPath.suffix: + rawDataIN = read_tromino_files(dPath, params, verbose=verbose, **kwargs) + else: + try: + rawDataIN = obspy.read(dPath) + except Exception: + raise ValueError(f"{dPath.suffix} is not a a filetype that can be read by SpRIT (via ObsPy)") + else: + if isinstance(dPath, list) or isinstance(dPath, tuple): + rawStreams = [] + for datafile in dPath: + rawStream = obspy.read(datafile, **obspyReadKwargs) + rawStreams.append(rawStream) #These are actually streams, not traces + for i, stream in enumerate(rawStreams): + if i == 0: + rawDataIN = obspy.Stream(stream) #Just in case + else: + rawDataIN = rawDataIN + stream #This adds a stream/trace to the current stream object + elif str(dPath)[:6].lower()=='sample': + pass + else: + rawDataIN = obspy.read(dPath, **obspyReadKwargs)#, starttime=obspy.core.UTCDateTime(params['starttime']), endttime=obspy.core.UTCDateTime(params['endtime']), nearest_sample =True) + #import warnings # For some reason not being imported at the start + #with warnings.catch_warnings(): + #warnings.simplefilter(action='ignore', category=UserWarning) + #rawDataIN.attach_response(inv) + elif source == 'batch' and str(params['input_data']).lower() not in SAMPLE_LIST: + if verbose: + print('\nFetching data (fetch_data())') + batch_data_read_kwargs = {k: v for k, v in kwargs.items() if k in tuple(inspect.signature(batch_data_read).parameters.keys())} + params = batch_data_read(batch_data=params['input_data'], verbose=verbose, **batch_data_read_kwargs) + params = HVSRBatch(params) + return params + elif str(params['input_data']).lower() in SAMPLE_LIST or f"sample{params['input_data'].lower()}" in SAMPLE_LIST: + sample_data_dir = pathlib.Path(pkg_resources.resource_filename(__name__, 'resources/sample_data/')) + if source=='batch': + params['input_data'] = sample_data_dir.joinpath('Batch_SampleData.csv') + params = batch_data_read(batch_data=params['input_data'], batch_type='sample', verbose=verbose) + params = HVSRBatch(params) + return params + elif source=='dir': + params['input_data'] = sample_data_dir.joinpath('Batch_SampleData.csv') + params = batch_data_read(batch_data=params['input_data'], batch_type='sample', verbose=verbose) + params = HVSRBatch(params) + return params + elif source=='file': + params['input_data'] = str(params['input_data']).lower() + + if params['input_data'].lower() in sampleFileKeyMap.keys(): + params['input_data'] = sampleFileKeyMap[params['input_data'].lower()] + else: + params['input_data'] = sample_data_dir.joinpath('SampleHVSRSite1_AM.RAC84.00.2023.046_2023-02-15_1704-1734.MSEED') + + dPath = params['input_data'] + rawDataIN = obspy.read(dPath)#, starttime=obspy.core.UTCDateTime(params['starttime']), endttime=obspy.core.UTCDateTime(params['endtime']), nearest_sample =True) + #import warnings + #with warnings.catch_warnings(): + # warnings.simplefilter(action='ignore', category=UserWarning) + # rawDataIN.attach_response(inv) + else: + # Last try if source cannot be read correctly + try: + rawDataIN = obspy.read(dPath) + except: + RuntimeError(f'source={source} not recognized, and input_data cannot be read using obspy.read()') + + #Get metadata from the data itself, if not reading raw data + try: + # If the data already exists (not reading in raw from RS, for example), get the parameters from the data + dataIN = rawDataIN.copy() + if source != 'raw': + # Use metadata from file for updating: + # site + site_default = inspect.signature(input_params).parameters['site'].default + if params['site'] == site_default and params['site'] != dPath.stem: + if isinstance(dPath, (list, tuple)): + dPath = dPath[0] + params['site'] = dPath.stem + params['params']['site'] = dPath.stem + if verbose: + print(f"\t\tSite name updated to {params['site']}") + + # network + net_default = inspect.signature(input_params).parameters['network'].default + if params['net'] == net_default and net_default != dataIN[0].stats.network: + params['net'] = dataIN[0].stats.network + params['params']['net'] = dataIN[0].stats.network + if verbose: + print(f"\t\tNetwork name updated to {params['net']}") + + # station + sta_default = inspect.signature(input_params).parameters['station'].default + if str(params['sta']) == sta_default and str(params['sta']) != dataIN[0].stats.station: + params['sta'] = dataIN[0].stats.station + params['station'] = dataIN[0].stats.station + params['params']['sta'] = dataIN[0].stats.station + params['params']['station'] = dataIN[0].stats.station + if verbose: + print(f"\t\tStation name updated to {params['sta']}") + + # loc + loc_default = inspect.signature(input_params).parameters['loc'].default + if params['loc'] == loc_default and params['loc'] != dataIN[0].stats.location: + params['loc'] = dataIN[0].stats.location + params['params']['loc'] = dataIN[0].stats.location + if verbose: + print(f"\t\tLocation updated to {params['loc']}") + + # channels + channelList = [] + cha_default = inspect.signature(input_params).parameters['channels'].default + if str(params['cha']) == cha_default: + for tr in dataIN: + if tr.stats.channel not in channelList: + channelList.append(tr.stats.channel) + channelList.sort(reverse=True) #Just so z is first, just in case + if set(params['cha']) != set(channelList): + params['cha'] = channelList + params['params']['cha'] = channelList + if verbose: + print(f"\t\tChannels updated to {params['cha']}") + + # Acquisition date + acqdate_default = inspect.signature(input_params).parameters['acq_date'].default + if str(params['acq_date']) == acqdate_default and params['acq_date'] != dataIN[0].stats.starttime.date: + params['acq_date'] = dataIN[0].stats.starttime.date + if verbose: + print(f"\t\tAcquisition Date updated to {params['acq_date']}") + + # starttime + today_Starttime = obspy.UTCDateTime(datetime.datetime(year=datetime.date.today().year, month=datetime.date.today().month, + day = datetime.date.today().day, + hour=0, minute=0, second=0, microsecond=0)) + maxStarttime = datetime.datetime(year=params['acq_date'].year, month=params['acq_date'].month, day=params['acq_date'].day, + hour=0, minute=0, second=0, microsecond=0, tzinfo=datetime.timezone.utc) + stime_default = inspect.signature(input_params).parameters['starttime'].default + str(params['starttime']) == str(stime_default) + if str(params['starttime']) == str(stime_default): + for tr in dataIN.merge(): + currTime = datetime.datetime(year=tr.stats.starttime.year, month=tr.stats.starttime.month, day=tr.stats.starttime.day, + hour=tr.stats.starttime.hour, minute=tr.stats.starttime.minute, + second=tr.stats.starttime.second, microsecond=tr.stats.starttime.microsecond, tzinfo=datetime.timezone.utc) + if currTime > maxStarttime: + maxStarttime = currTime + + newStarttime = obspy.UTCDateTime(datetime.datetime(year=params['acq_date'].year, month=params['acq_date'].month, + day = params['acq_date'].day, + hour=maxStarttime.hour, minute=maxStarttime.minute, + second=maxStarttime.second, microsecond=maxStarttime.microsecond)) + if params['starttime'] != newStarttime: + params['starttime'] = newStarttime + params['params']['starttime'] = newStarttime + if verbose: + print(f"\t\tStarttime updated to {params['starttime']}") + + # endttime + today_Endtime = obspy.UTCDateTime(datetime.datetime(year=datetime.date.today().year, month=datetime.date.today().month, + day = datetime.date.today().day, + hour=23, minute=59, second=59, microsecond=999999)) + tomorrow_Endtime = today_Endtime + (60*60*24) + minEndtime = datetime.datetime.now(tz=datetime.timezone.utc)#.replace(tzinfo=datetime.timezone.utc)#(hour=23, minute=59, second=59, microsecond=999999) + etime_default = inspect.signature(input_params).parameters['endtime'].default + if str(params['endtime']) == etime_default or str(params['endtime']) == tomorrow_Endtime: + for tr in dataIN.merge(): + currTime = datetime.datetime(year=tr.stats.endtime.year, month=tr.stats.endtime.month, day=tr.stats.endtime.day, + hour=tr.stats.endtime.hour, minute=tr.stats.endtime.minute, + second=tr.stats.endtime.second, microsecond=tr.stats.endtime.microsecond, tzinfo=datetime.timezone.utc) + if currTime < minEndtime: + minEndtime = currTime + newEndtime = obspy.UTCDateTime(datetime.datetime(year=minEndtime.year, month=minEndtime.month, + day = minEndtime.day, + hour=minEndtime.hour, minute=minEndtime.minute, + second=minEndtime.second, microsecond=minEndtime.microsecond, tzinfo=datetime.timezone.utc)) + + if params['endtime'] != newEndtime: + params['endtime'] = newEndtime + params['params']['endtime'] = newEndtime + if verbose: + print(f"\t\tEndtime updated to {params['endtime']}") + + # HVSR_ID (derived) + project = params['project'] + if project is None: + proj_id = '' + else: + proj_id = str(project)+'-' + + params['hvsr_id'] = f"{proj_id}{params['acq_date'].strftime('%Y%m%d')}-{params['starttime'].strftime('%H%M')}-{params['station']}" + params['params']['hvsr_id'] = f"{proj_id}{params['acq_date'].strftime('%Y%m%d')}-{params['starttime'].strftime('%H%M')}-{params['station']}" + + # Clean up + dataIN = dataIN.split() + dataIN = dataIN.trim(starttime=params['starttime'], endtime=params['endtime']) + dataIN.merge() + except Exception as e: + raise RuntimeError(f'Data not fetched. \n{e}.\n\ntCheck your input parameters or the data file.') + + # Get and update metadata + params = get_metadata(params, update_metadata=update_metadata, source=source) + inv = params['inv'] + + # Trim and save data as specified + if data_export_path=='None': + data_export_path=None + if not data_export_path: + pass + else: + if isinstance(params, HVSRBatch): + pass + else: + dataIN = _trim_data(input=params, stream=dataIN, export_dir=data_export_path, source=source, data_export_format=data_export_format) + + # Split data if masked array (if there are gaps)...detrending cannot be done without + for tr in dataIN: + if isinstance(tr.data, np.ma.masked_array): + dataIN = dataIN.split() + #Splits entire stream if any trace is masked_array + break + + # Detrend data + if isinstance(params, HVSRBatch): + pass + else: + dataIN = __detrend_data(input=dataIN, detrend=detrend, detrend_order=detrend_order, verbose=verbose, source=source) + + # Remerge data + dataIN = dataIN.merge(method=1) + + # Plot the input stream? + if plot_input_stream: + if plot_engine.lower() in ['plotly', 'plty', 'p']: + if 'spectrogram_component' in kwargs.keys(): + specComp = kwargs['spectrogram_component'] + else: + specComp = 'Z' + params['InputPlot'] = sprit_plot.plot_preview(hv_data=params, stream=dataIN, spectrogram_component=specComp, show_plot=show_plot, return_fig=True) + elif plot_engine.lower() in ['obspy', 'ospby', 'osbpy', 'opsby', 'opspy', 'o']: + params['InputPlot'] = dataIN.plot(method='full', linewidth=0.25, handle=True, show=False) + if show_plot: + plt.show() + else: + plt.close() + else: + try: + params['InputPlot'] = _plot_specgram_stream(stream=dataIN, params=params, component='Z', stack_type='linear', detrend='mean', dbscale=True, fill_gaps=None, ylimstd=3, return_fig=True, fig=None, ax=None, show_plot=False) + + #_get_removed_windows(input=dataIN, fig=params['InputPlot'][0], ax=params['InputPlot'][1], lineArtist =[], winArtist = [], existing_lineArtists=[], existing_xWindows=[], exist_win_format='matplotlib', keep_line_artists=True, time_type='matplotlib', show_plot=True) + if show_plot: + plt.show() + else: + plt.close() + except Exception as e: + print(f'Error with default plotting method: {e}.\n Falling back to internal obspy plotting method') + params['InputPlot'] = dataIN.plot(method='full', linewidth=0.25, handle=True, show=False) + if show_plot: + plt.show() + else: + plt.close() + else: + params['InputPlot'] = None + + # Sort channels (make sure Z is first, makes things easier later) + if isinstance(params, HVSRBatch): + pass + else: + dataIN = _sort_channels(input=dataIN, source=source, verbose=verbose) + + # Clean up the ends of the data unless explicitly specified to do otherwise (this is a kwarg, not a parameter) + if 'clean_ends' not in kwargs.keys(): + clean_ends = True + else: + clean_ends = kwargs['clean_ends'] + + if clean_ends: + + maxStarttime = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(days=36500) #100 years ago + minEndtime = datetime.datetime.now(datetime.timezone.utc) + + for tr in dataIN: + currStarttime = datetime.datetime(year=tr.stats.starttime.year, month=tr.stats.starttime.month, day=tr.stats.starttime.day, + hour=tr.stats.starttime.hour, minute=tr.stats.starttime.minute, + second=tr.stats.starttime.second, microsecond=tr.stats.starttime.microsecond, tzinfo=datetime.timezone.utc) + if currStarttime > maxStarttime: + maxStarttime = currStarttime + + currEndtime = datetime.datetime(year=tr.stats.endtime.year, month=tr.stats.endtime.month, day=tr.stats.endtime.day, + hour=tr.stats.endtime.hour, minute=tr.stats.endtime.minute, + second=tr.stats.endtime.second, microsecond=tr.stats.endtime.microsecond, tzinfo=datetime.timezone.utc) + + if currEndtime < minEndtime: + minEndtime = currEndtime + + maxStarttime = obspy.UTCDateTime(maxStarttime) + minEndtime = obspy.UTCDateTime(minEndtime) + dataIN = dataIN.split() + for tr in dataIN: + tr.trim(starttime=maxStarttime, endtime=minEndtime) + pass + dataIN.merge() + + params['batch'] = False # Set False by default, will get corrected later if batch + params['input_stream'] = dataIN.copy() # Original stream as read + params['stream'] = dataIN.copy() # Stream that may be modified later + + if 'processing_parameters' not in params.keys(): + params['processing_parameters'] = {} + params['processing_parameters']['fetch_data'] = {} + for key, value in orig_args.items(): + params['processing_parameters']['fetch_data'][key] = value + + # Attach response data to stream and get paz (for PPSD later) + # Check if response can be attached + params['stream'].attach_response(params['inv']) + + responseMatch = {} + for trace in params['stream']: + k = trace.stats.component + responseMatch[k] = False + + for sta in params['inv'].networks[0].stations: # Assumes only one network per inst + hasCha = False + hasLoc = False + hasSta = False + isStarted= False + notEnded = False + + # Check station + if sta.code == params['stream'][0].stats.station: + hasSta = True + else: + continue + + # Check Channel + for cha in sta: + if cha.code==trace.stats.channel: + hasCha = True + + # Check location + if cha.location_code == trace.stats.location: + hasLoc = True + + + # Check time + if (cha.start_date is None or cha.start_date <= tr.stats.starttime): + isStarted = True + + if (cha.end_date is None or cha.end_date >= tr.stats.endtime): + notEnded = True + + + if all([hasSta, hasCha, hasLoc, isStarted, notEnded]): + responseMatch[k] = True + + if responseMatch[k] is not True: + responseMatch[k] = {'Station': (hasSta,[sta.code for sta in params['inv'].networks[0].stations]), + 'Channel': (hasCha, [cha.code for cha in sta for sta in params['inv'].networks[0].stations]), + 'Location': (hasLoc, [cha.location_code for cha in sta for sta in params['inv'].networks[0].stations]), + 'Starttime':(isStarted, [cha.start_date for cha in sta for sta in params['inv'].networks[0].stations]), + 'Endtime': (notEnded, [cha.end_date for cha in sta for sta in params['inv'].networks[0].stations])} + + metadataMatchError = False + for comp, matchItems in responseMatch.items(): + if matchItems is not True: + metadataMatchError = True + errorMsg = 'The following items in your data need to be matched in the instrument response/metadata:' + for matchType, match in matchItems.items(): + if match[0] is False: + errorMsg = errorMsg + f"\n\t{matchType} does not match {match[1]} correctly for component {comp}: {params['stream'].select(component=comp)[0].stats[matchType.lower()]}" + + if metadataMatchError: + print(errorMsg) + raise ValueError('Instrument Response/Metadata does not match input data and cannot be used!!\n'+errorMsg) + + try: + params['stream'].attach_response(params['inv']) + for tr in params['stream']: + cmpnt = tr.stats.component + + params['paz'][cmpnt]['poles'] = tr.stats.response.get_paz().poles + params['paz'][cmpnt]['zeros'] = tr.stats.response.get_paz().zeros + params['paz'][cmpnt]['sensitivity'] = tr.stats.response.get_paz().stage_gain + params['paz'][cmpnt]['gain'] = tr.stats.response.get_paz().normalization_factor + except Exception: + raise ValueError("Metadata missing, incomplete, or incorrect") + + params['ProcessingStatus']['FetchDataStatus'] = True + if verbose and not isinstance(params, HVSRBatch): + dataINStr = dataIN.__str__().split('\n') + for line in dataINStr: + print('\t\t', line) + + params = _check_processing_status(params, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose) + + return params
+ + + +# Generate PPSDs for each channel +
+[docs] +def generate_ppsds(hvsr_data, azimuthal_ppsds=False, verbose=False, **ppsd_kwargs): + """Generates PPSDs for each channel + + Channels need to be in Z, N, E order + Info on PPSD creation here: https://docs.obspy.org/packages/autogen/obspy.signal.spectral_estimation.PPSD.html + + Parameters + ---------- + hvsr_data : dict, HVSRData object, or HVSRBatch object + Data object containing all the parameters and other data of interest (stream and paz, for example) + azimuthal_ppsds : bool, default=False + Whether to generate PPSDs for azimuthal data + verbose : bool, default=True + Whether to print inputs and results to terminal + **ppsd_kwargs : dict + Dictionary with keyword arguments that are passed directly to obspy.signal.PPSD. + If the following keywords are not specified, their defaults are amended in this function from the obspy defaults for its PPSD function. Specifically: + - ppsd_length defaults to 30 (seconds) here instead of 3600 + - skip_on_gaps defaults to True instead of False + - period_step_octaves defaults to 0.03125 instead of 0.125 + + Returns + ------- + ppsds : HVSRData object + Dictionary containing entries with ppsds for each channel + """ + + # First, divide up for batch or not + orig_args = locals().copy() # Get the initial arguments + start_time = datetime.datetime.now() + + ppsd_kwargs_sprit_defaults = ppsd_kwargs.copy() + #Set defaults here that are different than obspy defaults + if 'ppsd_length' not in ppsd_kwargs.keys(): + ppsd_kwargs_sprit_defaults['ppsd_length'] = 30.0 + if 'skip_on_gaps' not in ppsd_kwargs.keys(): + ppsd_kwargs_sprit_defaults['skip_on_gaps'] = True + if 'period_step_octaves' not in ppsd_kwargs.keys(): + ppsd_kwargs_sprit_defaults['period_step_octaves'] = 0.03125 + if 'period_limits' not in ppsd_kwargs.keys(): + if 'hvsr_band' in hvsr_data.keys(): + ppsd_kwargs_sprit_defaults['period_limits'] = [1/hvsr_data['hvsr_band'][1], 1/hvsr_data['hvsr_band'][0]] + elif 'input_params' in hvsr_data.keys() and 'hvsr_band' in hvsr_data['input_params'].keys(): + ppsd_kwargs_sprit_defaults['period_limits'] = [1/hvsr_data['input_params']['hvsr_band'][1], 1/hvsr_data['input_params']['hvsr_band'][0]] + else: + ppsd_kwargs_sprit_defaults['period_limits'] = [1/40, 1/0.4] + else: + if verbose: + print(f"\t\tUpdating hvsr_band to band specified by period_limits={ppsd_kwargs['period_limits']}") + + if 'hvsr_band' in hvsr_data.keys(): + if ppsd_kwargs['period_limits'] is None: + ppsd_kwargs['period_limits'] = np.round([1/hvsr_data['hvsr_band'][1], 1/hvsr_data['hvsr_band'][0]], 3).tolist() + else: + hvsr_data['hvsr_band'] = np.round([1/ppsd_kwargs['period_limits'][1], 1/ppsd_kwargs['period_limits'][0]], 2).tolist() + + if 'input_params' in hvsr_data.keys() and 'hvsr_band' in hvsr_data['input_params'].keys(): + hvsr_data['input_params']['hvsr_band'] = np.round([1/ppsd_kwargs['period_limits'][1], 1/ppsd_kwargs['period_limits'][0]], 2).tolist() + + + #Get Probablistic power spectral densities (PPSDs) + #Get default args for function + def get_default_args(func): + signature = inspect.signature(func) + return { + k: v.default + for k, v in signature.parameters.items() + if v.default is not inspect.Parameter.empty + } + + ppsd_kwargs = get_default_args(PPSD) + ppsd_kwargs.update(ppsd_kwargs_sprit_defaults) # Update with sprit defaults, or user input + orig_args['ppsd_kwargs'] = ppsd_kwargs + + # Update with processing parameters specified previously in input_params, if applicable + if 'processing_parameters' in hvsr_data.keys(): + if 'generate_ppsds' in hvsr_data['processing_parameters'].keys(): + defaultVDict = dict(zip(inspect.getfullargspec(generate_ppsds).args[1:], + inspect.getfullargspec(generate_ppsds).defaults)) + defaultVDict['ppsd_kwargs'] = ppsd_kwargs + update_msg = [] + for k, v in hvsr_data['processing_parameters']['generate_ppsds'].items(): + # Manual input to function overrides the imported parameter values + if not isinstance(v, (HVSRData, HVSRBatch)) and (k in orig_args.keys()) and (orig_args[k] == defaultVDict[k]): + update_msg.append(f'\t\t{k} = {v} (previously {orig_args[k]})') + orig_args[k] = v + + azimuthal_ppsds = orig_args['azimuthal_ppsds'] + verbose = orig_args['verbose'] + ppsd_kwargs = orig_args['ppsd_kwargs'] + + # if (verbose and isinstance(hvsr_data, HVSRBatch)) or (verbose and not hvsr_data['batch']): + if verbose: + print('\nGenerating Probabilistic Power Spectral Densities (generate_ppsds())') + print('\tUsing the following parameters:') + for key, value in orig_args.items(): + if key == 'hvsr_data': + pass + else: + print('\t {}={}'.format(key, value)) + print() + + if 'processing_parameters' in hvsr_data.keys() and 'generate_ppsds' in hvsr_data['processing_parameters'].keys(): + if update_msg != []: + update_msg.insert(0, '\tThe following parameters were updated using the processing_parameters attribute:') + for msg_line in update_msg: + print(msg_line) + print() + + if isinstance(hvsr_data, HVSRBatch): + #If running batch, we'll loop through each one + for site_name in hvsr_data.keys(): + args = orig_args.copy() #Make a copy so we don't accidentally overwrite + individual_params = hvsr_data[site_name] #Get what would normally be the "hvsr_data" variable for each site + args['hvsr_data'] = individual_params #reset the hvsr_data parameter we originally read in to an individual site hvsr_data + #args['hvsr_data']['batch'] = False #Set to false, since only running this time + if hvsr_data[site_name]['ProcessingStatus']['OverallStatus']: + try: + hvsr_data[site_name] = __generate_ppsds_batch(**args) #Call another function, that lets us run this function again + except: + hvsr_data[site_name]['ProcessingStatus']['PPSDStatus']=False + hvsr_data[site_name]['ProcessingStatus']['OverallStatus'] = False + else: + hvsr_data[site_name]['ProcessingStatus']['PPSDStatus']=False + hvsr_data[site_name]['ProcessingStatus']['OverallStatus'] = False + + try: + sprit_tkinter_ui.update_progress_bars(prog_percent=5) + except Exception as e: + pass + #print(e) + return hvsr_data + + paz = hvsr_data['paz'] + stream = hvsr_data['stream'] + + # Get ppsds of e component + eStream = stream.select(component='E') + estats = eStream.traces[0].stats + ppsdE = PPSD(estats, paz['E'], **ppsd_kwargs) + ppsdE.add(eStream) + + # Get ppsds of n component + nStream = stream.select(component='N') + nstats = nStream.traces[0].stats + ppsdN = PPSD(nstats, paz['N'], **ppsd_kwargs) + ppsdN.add(nStream) + + # Get ppsds of z component + zStream = stream.select(component='Z') + zstats = zStream.traces[0].stats + ppsdZ = PPSD(zstats, paz['Z'], **ppsd_kwargs) + ppsdZ.add(zStream) + + # Get ppsds of R components (azimuthal data) + has_az = False + ppsds = {'Z':ppsdZ, 'E':ppsdE, 'N':ppsdN} + rStream = stream.select(component='R') + for curr_trace in stream: + if 'R' in curr_trace.stats.channel: + curr_stats = curr_trace.stats + ppsd_curr = PPSD(curr_stats, paz['E'], **ppsd_kwargs) + has_az = True + ppsdName = curr_trace.stats.location + ppsd_curr.add(rStream) + ppsds[ppsdName] = ppsd_curr + + # Add to the input dictionary, so that some items can be manipulated later on, and original can be saved + hvsr_data['ppsds_obspy'] = ppsds + hvsr_data['ppsds'] = {} + anyKey = list(hvsr_data['ppsds_obspy'].keys())[0] + + # Get ppsd class members + members = [mems for mems in dir(hvsr_data['ppsds_obspy'][anyKey]) if not callable(mems) and not mems.startswith("_")] + for k in ppsds.keys(): + hvsr_data['ppsds'][k] = {} + + #Get lists/arrays so we can manipulate data later and copy everything over to main 'ppsds' subdictionary (convert lists to np.arrays for consistency) + listList = ['times_data', 'times_gaps', 'times_processed','current_times_used', 'psd_values'] #Things that need to be converted to np.array first, for consistency + timeKeys= ['times_processed','current_times_used','psd_values'] + timeDiffWarn = True + dfList = [] + time_data = {} + time_dict = {} + for m in members: + for k in hvsr_data['ppsds'].keys(): + hvsr_data['ppsds'][k][m] = getattr(hvsr_data['ppsds_obspy'][k], m) + if m in listList: + hvsr_data['ppsds'][k][m] = np.array(hvsr_data['ppsds'][k][m]) + + if str(m)=='times_processed': + unique_times = np.unique(np.array([hvsr_data['ppsds']['Z'][m], + hvsr_data['ppsds']['E'][m], + hvsr_data['ppsds']['N'][m]])) + + common_times = [] + for currTime in unique_times: + if currTime in hvsr_data['ppsds']['Z'][m]: + if currTime in hvsr_data['ppsds']['E'][m]: + if currTime in hvsr_data['ppsds']['N'][m]: + common_times.append(currTime) + + cTimeIndList = [] + for cTime in common_times: + ZArr = hvsr_data['ppsds']['Z'][m] + EArr = hvsr_data['ppsds']['E'][m] + NArr = hvsr_data['ppsds']['N'][m] + + cTimeIndList.append([int(np.where(ZArr == cTime)[0][0]), + int(np.where(EArr == cTime)[0][0]), + int(np.where(NArr == cTime)[0][0])]) + + # Make sure number of time windows is the same between PPSDs (this can happen with just a few slightly different number of samples) + if m in timeKeys: + if str(m) != 'times_processed': + time_data[str(m)] = (hvsr_data['ppsds']['Z'][m], hvsr_data['ppsds']['E'][m], hvsr_data['ppsds']['N'][m]) + + tSteps_same = hvsr_data['ppsds']['Z'][m].shape[0] == hvsr_data['ppsds']['E'][m].shape[0] == hvsr_data['ppsds']['N'][m].shape[0] + + if not tSteps_same: + shortestTimeLength = min(hvsr_data['ppsds']['Z'][m].shape[0], hvsr_data['ppsds']['E'][m].shape[0], hvsr_data['ppsds']['N'][m].shape[0]) + + maxPctDiff = 0 + for comp in hvsr_data['ppsds'].keys(): + currCompTimeLength = hvsr_data['ppsds'][comp][m].shape[0] + timeLengthDiff = currCompTimeLength - shortestTimeLength + percentageDiff = timeLengthDiff / currCompTimeLength + if percentageDiff > maxPctDiff: + maxPctDiff = percentageDiff + + for comp in hvsr_data['ppsds'].keys(): + while hvsr_data['ppsds'][comp][m].shape[0] > shortestTimeLength: + hvsr_data['ppsds'][comp][m] = hvsr_data['ppsds'][comp][m][:-1] + + + if maxPctDiff > 0.05 and timeDiffWarn: + warnings.warn(f"\t Number of ppsd time windows between different components is significantly different: {round(maxPctDiff*100,2)}% > 5%. Last windows will be trimmed.") + elif verbose and timeDiffWarn: + print(f"\t Number of ppsd time windows between different components is different by {round(maxPctDiff*100,2)}%. Last window(s) of components with larger number of ppsd windows will be trimmed.") + timeDiffWarn = False #So we only do this warning once, even though there may be multiple arrays that need to be trimmed + + for i, currTStep in enumerate(cTimeIndList): + colList = [] + currTStepList = [] + colList.append('Use') + currTStepList.append(np.ones_like(common_times[i]).astype(bool)) + for tk in time_data.keys(): + if 'current_times_used' not in tk: + for i, k in enumerate(hvsr_data['ppsds'].keys()): + if k.lower() in ['z', 'e', 'n']: + colList.append(str(tk)+'_'+k) + currTStepList.append(time_data[tk][i][currTStep[i]]) + + dfList.append(currTStepList) + hvsrDF = pd.DataFrame(dfList, columns=colList) + if verbose: + print(f"\t\thvsr_windows_df created with columns: {', '.join(hvsrDF.columns)}") + hvsrDF['Use'].astype(bool) + # Add azimuthal ppsds values + for k in hvsr_data['ppsds'].keys(): + if k.upper() not in ['Z', 'E', 'N']: + hvsrDF['psd_values_'+k] = hvsr_data['ppsds'][k]['psd_values'].tolist() + + hvsrDF['TimesProcessed_Obspy'] = common_times + hvsrDF['TimesProcessed_ObspyEnd'] = hvsrDF['TimesProcessed_Obspy'] + ppsd_kwargs['ppsd_length'] + # colList.append('TimesProcessed_Obspy') + # currTStepList.append(common_times[i]) + # Add other times (for start times) + def convert_to_datetime(obspyUTCDateTime): + return obspyUTCDateTime.datetime.replace(tzinfo=datetime.timezone.utc) + + def convert_to_mpl_dates(obspyUTCDateTime): + return obspyUTCDateTime.matplotlib_date + + hvsrDF['TimesProcessed'] = hvsrDF['TimesProcessed_Obspy'].apply(convert_to_datetime) + hvsrDF['TimesProcessed_End'] = hvsrDF['TimesProcessed'] + datetime.timedelta(days=0, seconds=ppsd_kwargs['ppsd_length']) + hvsrDF['TimesProcessed_MPL'] = hvsrDF['TimesProcessed_Obspy'].apply(convert_to_mpl_dates) + hvsrDF['TimesProcessed_MPLEnd'] = hvsrDF['TimesProcessed_MPL'] + (ppsd_kwargs['ppsd_length']/86400) + + # Take care of existing time gaps, in case not taken care of previously + for gap in hvsr_data['ppsds']['Z']['times_gaps']: + hvsrDF['Use'] = (hvsrDF['TimesProcessed_MPL'].gt(gap[1].matplotlib_date))| \ + (hvsrDF['TimesProcessed_MPLEnd'].lt(gap[0].matplotlib_date)).astype(bool)# | \ + hvsrDF.set_index('TimesProcessed', inplace=True) + hvsr_data['hvsr_windows_df'] = hvsrDF + + if 'x_windows_out' in hvsr_data.keys(): + if verbose: + print("\t\tRemoving Noisy windows from hvsr_windows_df.") + hvsr_data = __remove_windows_from_df(hvsr_data, verbose=verbose) + #for window in hvsr_data['x_windows_out']: + # print(window) + # hvsrDF['Use'] = (hvsrDF['TimesProcessed_MPL'][hvsrDF['Use']].lt(window[0]) & hvsrDF['TimesProcessed_MPLEnd'][hvsrDF['Use']].lt(window[0]) )| \ + # (hvsrDF['TimesProcessed_MPL'][hvsrDF['Use']].gt(window[1]) & hvsrDF['TimesProcessed_MPLEnd'][hvsrDF['Use']].gt(window[1])).astype(bool) + #hvsrDF['Use'] = hvsrDF['Use'].astype(bool) + + + # Create dict entry to keep track of how many outlier hvsr curves are removed (2-item list with [0]=current number, [1]=original number of curves) + hvsr_data['tsteps_used'] = [hvsrDF['Use'].sum(), hvsrDF['Use'].shape[0]] + #hvsr_data['tsteps_used'] = [hvsr_data['ppsds']['Z']['times_processed'].shape[0], hvsr_data['ppsds']['Z']['times_processed'].shape[0]] + + hvsr_data['tsteps_used'][0] = hvsr_data['ppsds']['Z']['current_times_used'].shape[0] + + hvsr_data = sprit_utils.make_it_classy(hvsr_data) + + if 'processing_parameters' not in hvsr_data.keys(): + hvsr_data['processing_parameters'] = {} + hvsr_data['processing_parameters']['generate_ppsds'] = {} + for key, value in orig_args.items(): + hvsr_data['processing_parameters']['generate_ppsds'][key] = value + + + hvsr_data['ProcessingStatus']['PPSDStatus'] = True + hvsr_data = _check_processing_status(hvsr_data, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose) + return hvsr_data
+ + + +# Gets the metadata for Raspberry Shake, specifically for 3D v.7 +
+[docs] +def get_metadata(params, write_path='', update_metadata=True, source=None, **read_inventory_kwargs): + """Get metadata and calculate or get paz parameter needed for PPSD + + Parameters + ---------- + params : dict + Dictionary containing all the input and other parameters needed for processing + Ouput from input_params() function + write_path : str + String with output filepath of where to write updated inventory or metadata file + If not specified, does not write file + update_metadata : bool + Whether to update the metadata file itself, or just read as-is. If using provided raspberry shake metadata file, select True. + source : str, default=None + This passes the source variable value to _read_RS_metadata. It is expected that this is passed directly from the source parameter of sprit.fetch_data() + + Returns + ------- + params : dict + Modified input dictionary with additional key:value pair containing paz dictionary (key = "paz") + """ + + invPath = params['metapath'] + raspShakeInstNameList = ['raspberry shake', 'shake', 'raspberry', 'rs', 'rs3d', 'rasp. shake', 'raspshake'] + trominoNameList = ['tromino', 'trom', 'trm', 't'] + if params['instrument'].lower() in raspShakeInstNameList: + if update_metadata: + params = _update_shake_metadata(filepath=invPath, params=params, write_path=write_path) + params = _read_RS_Metadata(params, source=source) + elif params['instrument'].lower() in trominoNameList: + params['paz'] = {'Z':{}, 'E':{}, 'N':{}} + #THESE VALUES ARE PLACEHOLDERS, taken from RASPBERRY SHAKE! (Needed for PPSDs) + #params['paz']['Z'] = {'sensitivity': 360000000.0, + # 'gain': 360000000.0, + # 'poles': [(-1+0j), (-3.03+0j), (-3.03+0j), (-666.67+0j)], + # 'zeros': [0j, 0j, 0j]} + #params['paz']['E'] = params['paz']['Z'] + #params['paz']['N'] = params['paz']['Z'] + + # Initially started here: https://ds.iris.edu/NRL/sensors/Sunfull/RESP.XX.NS721..BHZ.PS-4.5C1_LF4.5_RC3400_RSNone_SG82_STgroundVel + tromino_paz = { 'zeros': [-3.141592653589793/2-0j, -3.141592653589793/2-0j], + 'poles': [(17-24j), (17+24j)], + 'stage_gain':100, + 'stage_gain_frequency':10, + 'normalization_frequency':5, + 'normalization_factor':1} + + params['paz']['Z'] = params['paz']['E'] = params['paz']['N'] = tromino_paz + + tromChaResponse = obspy.core.inventory.response.Response().from_paz(**tromino_paz) + + obspyStartDate = obspy.UTCDateTime(1900,1,1) + obspyNow = obspy.UTCDateTime.now() + + # Update location code to match partition + if type(params['station']) is int or str(params['station']).isdigit(): + params['loc'] = str(params['station']) + + # Create channel objects to be used in inventory + channelObj_Z = obspy.core.inventory.channel.Channel(code='EHZ', location_code=params['loc'], latitude=params['params']['latitude'], + longitude=params['params']['longitude'], elevation=params['params']['elevation'], depth=params['params']['depth'], + azimuth=0, dip=90, start_date=obspyStartDate, end_date=obspyNow, response=tromChaResponse) + channelObj_E = obspy.core.inventory.channel.Channel(code='EHE', location_code=params['loc'], latitude=params['params']['latitude'], + longitude=params['params']['longitude'], elevation=params['params']['elevation'], depth=params['params']['depth'], + azimuth=90, dip=0, start_date=obspyStartDate, end_date=obspyNow, response=tromChaResponse) + channelObj_N = obspy.core.inventory.channel.Channel(code='EHN', location_code=params['loc'], latitude=params['params']['latitude'], + longitude=params['params']['longitude'], elevation=params['params']['elevation'], depth=params['params']['depth'], + azimuth=0, dip=0, start_date=obspyStartDate, end_date=obspyNow, response=tromChaResponse) + + # Create site object for inventory + siteObj = obspy.core.inventory.util.Site(name=params['params']['site'], description=None, town=None, county=None, region=None, country=None) + + # Create station object for inventory + stationObj = obspy.core.inventory.station.Station(code='TRMNO', latitude=params['params']['latitude'], longitude=params['params']['longitude'], + elevation=params['params']['elevation'], channels=[channelObj_Z, channelObj_E, channelObj_N], site=siteObj, + vault=None, geology=None, equipments=None, operators=None, creation_date=obspyStartDate, + termination_date=obspy.UTCDateTime(2100,1,1), total_number_of_channels=3, + selected_number_of_channels=3, description='Estimated data for Tromino, this is NOT from the manufacturer', + comments=None, start_date=obspyStartDate, end_date=obspyNow, + restricted_status=None, alternate_code=None, historical_code=None, + data_availability=obspy.core.inventory.util.DataAvailability(obspyStartDate, obspy.UTCDateTime.now()), + identifiers=None, water_level=None, source_id=None) + + # Create network object for inventory + network = [obspy.core.inventory.network.Network(code='AM', stations=[stationObj], total_number_of_stations=None, + selected_number_of_stations=None, description=None, comments=None, start_date=obspyStartDate, + end_date=obspyNow, restricted_status=None, alternate_code=None, historical_code=None, + data_availability=None, identifiers=None, operators=None, source_id=None)] + + params['inv'] = obspy.Inventory(networks=network) + else: + if not invPath: + pass #if invPath is None + elif not pathlib.Path(invPath).exists() or invPath == '': + warnings.warn(f"The metapath parameter was not specified correctly. Returning original params value {params['metapath']}") + readInvKwargs = {} + argspecs = inspect.getfullargspec(obspy.read_inventory) + for argName in argspecs[0]: + if argName in read_inventory_kwargs.keys(): + readInvKwargs[argName] = read_inventory_kwargs[argName] + + readInvKwargs['path_or_file_object'] = invPath + params['inv'] = obspy.read_inventory(invPath) + if 'params' in params.keys(): + params['params']['inv'] = params['inv'] + + return params
+ + + +# Get report (report generation and export) +
+[docs] +def get_report(hvsr_results, report_formats=['print', 'table', 'plot', 'html', 'pdf'], azimuth='HV', + plot_type='HVSR p ann C+ p ann Spec p ann', plot_engine='matplotlib', + show_print_report=True, show_table_report=False, show_plot_report=True, show_html_report=False, show_pdf_report=True, + suppress_report_outputs=False, show_report_outputs=False, + csv_handling='append', + report_export_format=None, report_export_path=None, + verbose=False, **kwargs): + """Generate and/or print and/or export a report of the HVSR analysis in a variety of formats. + + Formats include: + * 'print': A (monospace) text summary of the HVSR results + * 'table': A pandas.DataFrame summary of the HVSR Results. + This is useful for copy/pasting directly into a larger worksheet. + * 'plot': A plot summary of the HVSR results, generated using the plot_hvsr() function. + * 'html': An HTML document/text of the HVSR results. This includes the table, print, and plot reports in one document. + * 'pdf': A PDF document showing the summary of the HVSR Results. + The PDF report is simply the HTML report saved to an A4-sized PDF document. + + + Parameters + ---------- + hvsr_results : dict + Dictionary containing all the information about the processed hvsr data + report_formats : {'table', 'print', plot} + Format in which to print or export the report. + The following report_formats return the following items in the following attributes: + - 'plot': hvsr_results['Print_Report'] as a str + - 'print': hvsr_results['HV_Plot'] - matplotlib.Figure object + - 'table': hvsr_results['Table_Report']- pandas.DataFrame object + - list/tuple - a list or tuple of the above objects, in the same order they are in the report_formats list + - 'html': hvsr_results['HTML_Report'] - a string containing the text for an HTML document + - 'pdf': currently does not save to the HVSRData object itself, can only be saved to the disk directly + plot_type : str, default = 'HVSR p ann C+ p ann Spec + What type of plot to plot, if 'plot' part of report_formats input + azimuth : str, default = 'HV' + Which azimuth to plot, by default "HV" which is the main "azimuth" combining the E and N components + csv_handling : str, {'append', 'overwrite', 'keep/rename'} + How to handle table report outputs if the designated csv output file already exists. By default, appends the new information to the end of the existing file. + suppress_report_outputs : bool, default=False + If True, only reads output to appropriate attribute of data class (ie, print does not print, only reads text into variable). If False, performs as normal. + report_export_format : list or str, default=['pdf'] + A string or list of strings indicating which report formats should be exported to disk. + report_export_path : None, bool, or filepath, default = None + If None or False, does not export; if True, will export to same directory as the input_data parameter in the input_params() function. + Otherwise, it should be a string or path object indicating where to export results. May be a file or directory. + If a directory is specified, the filename will be "<site_name>_<acq_date>_<UTC start time>-<UTC end time>". + The extension/suffix defaults to png for report_formats="plot", csv for 'table', txt for 'print', html for 'html', and pdf for 'pdf.' + verbose : bool, default=True + Whether to print the results to terminal. This is the same output as report_formats='print', and will not repeat if that is already selected + + Returns + ------- + sprit.HVSRData + """ + orig_args = locals().copy() #Get the initial arguments + orig_args['report_formats'] = [str(f).lower() for f in orig_args['report_formats']] + update_msg = [] + + # Update with processing parameters specified previously in input_params, if applicable + if 'processing_parameters' in hvsr_results.keys(): + if 'get_report' in hvsr_results['processing_parameters'].keys(): + for k, v in hvsr_results['processing_parameters']['get_report'].items(): + defaultVDict = dict(zip(inspect.getfullargspec(get_report).args[1:], + inspect.getfullargspec(get_report).defaults)) + defaultVDict['kwargs'] = {} + # Manual input to function overrides the imported parameter values + if (not isinstance(v, (HVSRData, HVSRBatch))) and (k in orig_args.keys()) and (orig_args[k]==defaultVDict[k]): + update_msg.append(f'\t\t{k} = {v} (previously {orig_args[k]})') + orig_args[k] = v + + report_formats = orig_args['report_formats'] + azimuth = orig_args['azimuth'] + plot_type = orig_args['plot_type'] + plot_engine = orig_args['plot_engine'] + show_print_report = orig_args['show_print_report'] + show_table_report = orig_args['show_table_report'] + show_plot_report = orig_args['show_plot_report'] + show_html_report = orig_args['show_html_report'] + show_pdf_report = orig_args['show_pdf_report'] + suppress_report_outputs = orig_args['suppress_report_outputs'] + show_report_outputs = orig_args['show_report_outputs'] + report_export_format = orig_args['report_export_format'] + report_export_path = orig_args['report_export_path'] + csv_handling = orig_args['csv_handling'] + suppress_report_outputs = orig_args['suppress_report_outputs'] + verbose = orig_args['verbose'] + kwargs = orig_args['kwargs'] + + # Put Processing parameters in hvsr_results immediately (gets used later local function in get_report) + hvsr_results['processing_parameters']['get_report'] = {} + for key, value in orig_args.items(): + hvsr_results['processing_parameters']['get_report'][key] = value + + if verbose: + print('\nGetting HVSR Report: get_report()') + print('\tUsing the following parameters:') + for key, value in orig_args.items(): + if key == 'params': + pass + else: + print('\t {}={}'.format(key, value)) + print() + + if update_msg != [] and verbose: + update_msg.insert(0, '\tThe following parameters were updated using the processing_parameters attribute:') + for msg_line in update_msg: + print(msg_line) + + if isinstance(hvsr_results, HVSRBatch): + if verbose: + print('\nGetting Reports: Running in batch mode') + + print('\tUsing parameters:') + for key, value in orig_args.items(): + print(f'\t {key}={value}') + print() + + #If running batch, we'll loop through each site + for site_name in hvsr_results.keys(): + args = orig_args.copy() #Make a copy so we don't accidentally overwrite + individual_params = hvsr_results[site_name] #Get what would normally be the "params" variable for each site + args['hvsr_results'] = individual_params #reset the params parameter we originally read in to an individual site params + if hvsr_results[site_name]['ProcessingStatus']['OverallStatus']: + try: + hvsr_results[site_name] = __get_report_batch(**args) #Call another function, that lets us run this function again + except: + hvsr_results[site_name] = hvsr_results[site_name] + else: + hvsr_results[site_name] = hvsr_results[site_name] + + combined_csvReport = pd.DataFrame() + for site_name in hvsr_results.keys(): + if 'Table_Report' in hvsr_results[site_name].keys(): + combined_csvReport = pd.concat([combined_csvReport, hvsr_results[site_name]['Table_Report']], ignore_index=True, join='inner') + + if report_export_path is not None: + if report_export_path is True: + if pathlib.Path(hvsr_results['input_params']['input_data']) in sampleFileKeyMap.values(): + csvExportPath = pathlib.Path(os.getcwd()) + else: + csvExportPath = pathlib.Path(hvsr_results['input_params']['input_data']) + elif pathlib.Path(report_export_path).is_dir(): + csvExportPath = report_export_path + elif pathlib.Path(report_export_path).is_file(): + csvExportPath = report_export_path.parent + else: + csvExportPath = pathlib.Path(hvsr_results[site_name].input_data) + if csvExportPath.is_dir(): + pass + else: + csvExportPath = csvExportPath.parent + + combined_csvReport.to_csv(csvExportPath, index=False) + return hvsr_results + + if suppress_report_outputs: + show_print_report = show_plot_report = show_table_report = show_html_report = show_pdf_report = False + elif show_report_outputs: + show_print_report = show_plot_report = show_table_report = show_html_report = show_pdf_report = True + #if 'BestPeak' in hvsr_results.keys() and 'PassList' in hvsr_results['BestPeak'].keys(): + try: + curvTestsPassed = (hvsr_results['BestPeak'][azimuth]['PassList']['WinLen'] + + hvsr_results['BestPeak'][azimuth]['PassList']['SigCycles']+ + hvsr_results['BestPeak'][azimuth]['PassList']['LowCurveStD']) + curvePass = curvTestsPassed > 2 + + #Peak Pass? + peakTestsPassed = ( hvsr_results['BestPeak'][azimuth]['PassList']['ProminenceLow'] + + hvsr_results['BestPeak'][azimuth]['PassList']['ProminenceHi']+ + hvsr_results['BestPeak'][azimuth]['PassList']['AmpClarity']+ + hvsr_results['BestPeak'][azimuth]['PassList']['FreqStability']+ + hvsr_results['BestPeak'][azimuth]['PassList']['LowStDev_Freq']+ + hvsr_results['BestPeak'][azimuth]['PassList']['LowStDev_Amp']) + peakPass = peakTestsPassed >= 5 + except Exception as e: + errMsg= 'No BestPeak identified. Check peak_freq_range or hvsr_band or try to remove bad noise windows using remove_noise() or change processing parameters in process_hvsr() or generate_ppsds(). Otherwise, data may not be usable for HVSR.' + print(errMsg) + print(e) + hvsr_results['Plot_Report'] = plot_hvsr(hvsr_results, plot_type='HVSR t all C+ t SPEC', azimuth=azimuth, return_fig=True) + return hvsr_results + #raise RuntimeError('No BestPeak identified. Check peak_freq_range or hvsr_band or try to remove bad noise windows using remove_noise() or change processing parameters in process_hvsr() or generate_ppsds(). Otherwise, data may not be usable for HVSR.') + + # Figure out which reports will be used, and format them correctly + if isinstance(report_formats, (list, tuple)): + pass + else: + #We will use a loop later even if it's just one report type, so reformat to prepare for for loop + allList = [':', 'all'] + if report_formats.lower() in allList: + report_formats = ['print', 'table', 'plot', 'html', 'pdf'] + else: + report_formats = [report_formats] + + # Format the export formats correctly + if isinstance(report_export_format, (list, tuple)): + pass + elif report_export_format is None: + pass + else: + # We will use list methods later even if it's just one report type, so reformat as list + allList = [':', 'all'] + if report_export_format.lower() in allList: + report_export_format = ['print', 'table', 'plot', 'html', 'pdf'] + else: + report_export_format = [report_export_format] + + for i, rep_form in enumerate(report_formats): + if isinstance(report_export_path, (list, tuple)): + if not isinstance(report_formats, (list, tuple)): + warnings.warn('report_export_path is a list/tuple and report_formats is not. This may result in unexpected behavior.') + if isinstance(report_formats, (list, tuple)) and isinstance(report_export_path, (list, tuple)) and len(report_formats) != len(report_export_path): + warnings.warn('report_export_path and report_formats are both lists or tuples, but they are not the same length. This may result in unexpected behavior.') + exp_path = report_export_path[i] + else: + exp_path = report_export_path + + if report_export_format is None: + report_export_format = '' + + # Print_Report + if rep_form == 'print': + verbose_print = verbose + if show_print_report: + verbose_print = True + + # Generates print report and saves to hvsr_results["Print_Report"] + hsvr_results = _generate_print_report(hvsr_results, + azimuth = azimuth, + show_print_report = True, verbose=verbose_print) + + if 'print' in report_export_format: + if exp_path is None: + print_exp_path = exp_path + else: + print_exp_path = pathlib.Path(exp_path).with_suffix('.txt') + + export_report(hvsr_results, azimuth=azimuth, + report_export_format='print', report_export_path=print_exp_path, + show_report = False, # If report is to be shown, done in previous step + verbose = verbose_print) + + # Table_Report + elif rep_form == 'table': + verbose_table = verbose + if show_table_report: + verbose_table = True + + hsvr_results = _generate_table_report(hsvr_results, + azimuth=azimuth, + show_table_report=show_table_report, + verbose=verbose_table) + + if 'table' in report_export_format: + if exp_path is None: + table_exp_path = exp_path + else: + table_exp_path = pathlib.Path(exp_path).with_suffix('.csv') + + export_report(hvsr_results, azimuth=azimuth, + report_export_format='table', report_export_path=table_exp_path, + csv_handling=csv_handling, + show_report = False, # If report is to be shown, done in previous step + verbose = verbose_table) + + # HV_Plot + elif rep_form == 'plot': + plot_hvsr_kwargs = {k: v for k, v in kwargs.items() if k in tuple(inspect.signature(plot_hvsr).parameters.keys())} + if 'plot_type' in plot_hvsr_kwargs.keys(): + plot_hvsr_kwargs.pop('plot_type') + if 'plot_engine' in plot_hvsr_kwargs.keys(): + plot_hvsr_kwargs.pop('plot_engine') + + fig = plot_hvsr(hvsr_results, plot_type=plot_type, plot_engine=plot_engine, show_plot=show_plot_report, return_fig=True) + + if plot_engine.lower() not in ['plotly', 'plty', 'p']: + expFigAx = fig + else: + expFigAx = fig + + if 'plot' in report_export_format: + export_report(hvsr_results=hvsr_results, report_export_path=report_export_path, report_export_format='plot') + hvsr_results['BestPeak'][azimuth]['Report']['HV_Plot'] = hvsr_results['HV_Plot'] = fig + + if show_plot_report:#'show_plot' in plot_hvsr_kwargs.keys() and plot_hvsr_kwargs['show_plot'] is False: + print('\nPlot of data report:') + + if not verbose: + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + fig.show() + else: + fig.show() + else: + if verbose: + print("\n\tPlot of data report created and saved in ['HV_Plot'] attribute") + + # HTML_Report + elif rep_form == 'html': + verbose_html = verbose + if verbose or show_html_report: + verbose_html = True + hvsr_results = _generate_html_report(hsvr_results, show_html_report=show_html_report, verbose=verbose_html) + + if 'html' in report_export_format: + if exp_path is None: + html_exp_path = exp_path + else: + html_exp_path = pathlib.Path(exp_path).with_suffix('.html') + + export_report(hvsr_results, azimuth=azimuth, + report_export_format='html', report_export_path=html_exp_path, + show_report = False, # If report is to be shown, done in previous step + verbose = verbose_html) + + # PDF_Report + elif rep_form == 'pdf': + verbose_pdf = verbose + if verbose or show_pdf_report: + verbose_pdf = True + + # Don't repeat html printing, etc. if already done + if 'html' in report_formats: + show_html_report = False + else: + show_html_report = show_html_report + + if exp_path is None: + pdf_exp_path = exp_path + else: + pdf_exp_path = pathlib.Path(exp_path) + hvsr_results = _generate_pdf_report(hvsr_results, pdf_report_filepath=pdf_exp_path, + show_pdf_report=show_pdf_report, show_html_report=show_html_report, verbose=verbose_pdf) + + + return hvsr_results
+ + + +# Import data +
+[docs] +def import_data(import_filepath, data_format='pickle'): + """Function to import .hvsr (or other extension) data exported using export_data() function + + Parameters + ---------- + import_filepath : str or path object + Filepath of file created using export_data() function. This is usually a pickle file with a .hvsr extension + data_format : str, default='pickle' + Type of format data is in. Currently, only 'pickle' supported. Eventually, json or other type may be supported, by default 'pickle'. + + Returns + ------- + HVSRData or HVSRBatch object + """ + if data_format=='pickle': + with open(import_filepath, 'rb') as f: + dataIN = pickle.load(f) + else: + dataIN = import_filepath + return dataIN
+ + + +# Import settings +
+[docs] +def import_settings(settings_import_path, settings_import_type='instrument', verbose=False): + + allList = ['all', ':', 'both', 'any'] + if settings_import_type.lower() not in allList: + # if just a single settings dict is desired + with open(settings_import_path, 'r') as f: + settingsDict = json.load(f) + else: + # Either a directory or list + if isinstance(settings_import_path, (list, tuple)): + for setPath in settings_import_path: + pass + else: + settings_import_path = sprit_utils.checkifpath(settings_import_path) + if not settings_import_path.is_dir(): + raise RuntimeError(f'settings_import_type={settings_import_type}, but settings_import_path is not list/tuple or filepath to directory') + else: + instFile = settings_import_path.glob('*.inst') + procFile = settings_import_path.glob('*.proc') + return settingsDict
+ + + +# Define input parameters +
+[docs] +def input_params(input_data, + site='HVSR Site', + id_prefix=None, + network='AM', + station='RAC84', + loc='00', + channels=['EHZ', 'EHN', 'EHE'], + acq_date=str(datetime.datetime.now().date()), + starttime = obspy.UTCDateTime(NOWTIME.year, NOWTIME.month, NOWTIME.day, 0, 0, 0, 0), + endtime = obspy.UTCDateTime(NOWTIME.year, NOWTIME.month, NOWTIME.day, 23, 59, 59, 999999), + tzone = 'UTC', + xcoord = -88.2290526, + ycoord = 40.1012122, + elevation = 755, + input_crs = None, #'EPSG:4326',#4269 is NAD83, defaults to WGS (4326) + output_crs = None, + elev_unit = 'meters', + depth = 0, + instrument = 'Raspberry Shake', + metapath = None, + hvsr_band = [0.4, 40], + peak_freq_range=[0.4, 40], + processing_parameters={}, + verbose=False + ): + """Function for designating input parameters for reading in and processing data + + Parameters + ---------- + input_data : str or pathlib.Path object + Filepath of data. This can be a directory or file, but will need to match with what is chosen later as the source parameter in fetch_data() + site : str, default="HVSR Site" + Site name as designated by user for ease of reference. Used for plotting titles, filenames, etc. + id_prefix : str, default=None + A prefix that may be used to create unique identifiers for each site. + The identifier created is saved as the ['HVSR_ID'] attribute of the HVSRData object, + and is equivalent to the following formatted string: + f"{id_prefix}-{acq_date.strftime("%Y%m%d")}-{starttime.strftime("%H%M")}-{station}". + network : str, default='AM' + The network designation of the seismometer. This is necessary for data from Raspberry Shakes. 'AM' is for Amateur network, which fits Raspberry Shakes. + station : str, default='RAC84' + The station name of the seismometer. This is necessary for data from Raspberry Shakes. + loc : str, default='00' + Location information of the seismometer. + channels : list, default=['EHZ', 'EHN', 'EHE'] + The three channels used in this analysis, as a list of strings. Preferred that Z component is first, but not necessary + acq_date : str, int, date object, or datetime object + If string, preferred format is 'YYYY-MM-DD'. + If int, this will be interpreted as the time_int of year of current year (e.g., 33 would be Feb 2 of current year) + If date or datetime object, this will be the date. Make sure to account for time change when converting to UTC (if UTC is the following time_int, use the UTC time_int). + starttime : str, time object, or datetime object, default='00:00:00.00' + Start time of data stream. This is necessary for Raspberry Shake data in 'raw' form, or for trimming data. Format can be either 'HH:MM:SS.micros' or 'HH:MM' at minimum. + endtime : str, time obejct, or datetime object, default='23:59:99.99' + End time of data stream. This is necessary for Raspberry Shake data in 'raw' form, or for trimming data. Same format as starttime. + tzone : str or int, default = 'UTC' + Timezone of input data. If string, 'UTC' will use the time as input directly. Any other string value needs to be a TZ identifier in the IANA database, a wikipedia page of these is available here: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. + If int, should be the int value of the UTC offset (e.g., for American Eastern Standard Time: -5). + This is necessary for Raspberry Shake data in 'raw' format. + xcoord : float, default=-88.2290526 + Longitude (or easting, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in table output, but will likely be used in future for mapping/profile purposes. + ycoord : float, default=40.1012122 + Latitute (or northing, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in table output, but will likely be used in future for mapping/profile purposes. + input_crs : str or other format read by pyproj, default='EPSG:4326' + Coordinate reference system of input data, as used by pyproj.CRS.from_user_input() + output_crs : str or other format read by pyproj, default='EPSG:4326' + Coordinate reference system to which input data will be transformed, as used by pyproj.CRS.from_user_input() + elevation : float, default=755 + Surface elevation of data point. Not currently used (except in table output), but will likely be used in the future. + depth : float, default=0 + Depth of seismometer. Not currently used, but will likely be used in the future. + instrument : str {'Raspberry Shake', "Tromino"} + Instrument from which the data was acquired. + metapath : str or pathlib.Path object, default=None + Filepath of metadata, in format supported by obspy.read_inventory. If default value of None, will read from resources folder of repository (only supported for Raspberry Shake). + hvsr_band : list, default=[0.4, 40] + Two-element list containing low and high "corner" frequencies (in Hz) for processing. This can specified again later. + peak_freq_range : list or tuple, default=[0.4, 40] + Two-element list or tuple containing low and high frequencies (in Hz) that are used to check for HVSR Peaks. This can be a tigher range than hvsr_band, but if larger, it will still only use the hvsr_band range. + processing_parameters={} : dict or filepath, default={} + If filepath, should point to a .proc json file with processing parameters (i.e, an output from sprit.export_settings()). + Note that this only applies to parameters for the functions: 'fetch_data', 'remove_noise', 'generate_ppsds', 'process_hvsr', 'check_peaks', and 'get_report.' + If dictionary, dictionary containing nested dictionaries of function names as they key, and the parameter names/values as key/value pairs for each key. + If a function name is not present, or if a parameter name is not present, default values will be used. + For example: + `{ 'fetch_data' : {'source':'batch', 'data_export_path':"/path/to/trimmed/data", 'data_export_format':'mseed', 'detrend':'spline', 'plot_input_stream':True, 'verbose':False, kwargs:{'kwargskey':'kwargsvalue'}} }` + verbose : bool, default=False + Whether to print output and results to terminal + + Returns + ------- + params : sprit.HVSRData + sprit.HVSRData class containing input parameters, including data file path and metadata path. This will be used as an input to other functions. If batch processing, params will be converted to batch type in fetch_data() step. + + """ + orig_args = locals().copy() #Get the initial arguments + start_time = datetime.datetime.now() + + # Record any updates that are made to input_params based + update_msg = [] + + # Reformat times + if type(acq_date) is datetime.datetime: + date = str(acq_date.date()) + elif type(acq_date) is datetime.date: + date=str(acq_date) + elif type(acq_date) is str: + monthStrs = {'jan':1, 'january':1, + 'feb':2, 'february':2, + 'mar':3, 'march':3, + 'apr':4, 'april':4, + 'may':5, + 'jun':6, 'june':6, + 'jul':7, 'july':7, + 'aug':8, 'august':8, + 'sep':9, 'sept':9, 'september':9, + 'oct':10,'october':10, + 'nov':11,'november':11, + 'dec':12,'december':12} + + spelledMonth = False + for m in monthStrs.keys(): + acq_date = acq_date.lower() + if m in acq_date: + spelledMonth = True + break + + if spelledMonth is not False: + month = monthStrs[m] + + if '/' in acq_date: + sep = '/' + elif '.' in acq_date: + sep='.' + elif ' ' in acq_date: + sep = ' ' + acq_date = acq_date.replace(',', '') + else: + sep = '-' + + acq_date = acq_date.split(sep) + if len(acq_date[2]) > 2: #American format + date = '{}-{}-{}'.format(acq_date[2], acq_date[0], acq_date[1]) + else: #international format, one we're going to use + date = '{}-{}-{}'.format(acq_date[0], acq_date[1], acq_date[2]) + elif type(acq_date) is int: + year=datetime.datetime.today().year + date = str((datetime.datetime(year, 1, 1) + datetime.timedelta(acq_date - 1)).date()) + + if type(starttime) is str: + if 'T' in starttime: + #date=starttime.split('T')[0] + starttime = starttime.split('T')[1] + else: + pass + #starttime = date+'T'+starttime + elif type(starttime) is datetime.datetime: + #date = str(starttime.date()) + starttime = str(starttime.time()) + ###HERE IS NEXT + elif type(starttime) is datetime.time(): + starttime = str(starttime) + + if not isinstance(starttime, obspy.UTCDateTime): + starttime = str(date)+"T"+str(starttime) + starttime = obspy.UTCDateTime(sprit_utils.format_time(starttime, tzone=tzone)) + + if not isinstance(orig_args['starttime'], obspy.UTCDateTime) or starttime != orig_args['starttime']: + update_msg.append(f"\t\tstarttime was updated from {orig_args['starttime']} to {starttime}") + + + if type(endtime) is str: + if 'T' in endtime: + date=endtime.split('T')[0] + endtime = endtime.split('T')[1] + elif type(endtime) is datetime.datetime: + date = str(endtime.date()) + endtime = str(endtime.time()) + elif type(endtime) is datetime.time(): + endtime = str(endtime) + + if not isinstance(endtime, obspy.UTCDateTime): + endtime = str(date)+"T"+str(endtime) + endtime = obspy.UTCDateTime(sprit_utils.format_time(endtime, tzone=tzone)) + + if not isinstance(orig_args['starttime'], obspy.UTCDateTime) or starttime != orig_args['starttime']: + update_msg.append(f"\t\tendtime was updated from {orig_args['endtime']} to {endtime}") + + acq_date = datetime.date(year=int(date.split('-')[0]), month=int(date.split('-')[1]), day=int(date.split('-')[2])) + if acq_date != orig_args['acq_date']: + update_msg.append(f"\t\tacq_date was updated from {orig_args['acq_date']} to {acq_date}") + + raspShakeInstNameList = ['raspberry shake', 'shake', 'raspberry', 'rs', 'rs3d', 'rasp. shake', 'raspshake'] + + # If no CRS specified, assume WGS84 + if input_crs is None: + if verbose: + update_msg.append(f"\t\tNo value specified for input_crs, assuming WGS84 (EPSG:4326)") + input_crs = 'EPSG:4326' + + if output_crs is None: + if verbose: + update_msg.append(f"\t\tNo value specified for output_crs, using same coordinate system is input_crs (default is EPSG:4326)") + output_crs = input_crs + + # Get CRS Objects + input_crs = CRS.from_user_input(input_crs) + output_crs = CRS.from_user_input(output_crs) + wgs84_crs = CRS.from_user_input('EPSG:4326') + + # Get WGS84 coordinates (needed for inventory) + wgs84_transformer = Transformer.from_crs(input_crs, wgs84_crs, always_xy=True) + xcoord_wgs84, ycoord_wgs84 = wgs84_transformer.transform(xcoord, ycoord) + + xcoord_wgs84 = round(xcoord_wgs84, 7) + ycoord_wgs84 = round(ycoord_wgs84, 7) + + update_msg.append(f"\t\tLongitude ({xcoord_wgs84}) and Latitude ({ycoord_wgs84}) calculated for compatibility with obspy.") + + # Get coordinates in CRS specified in output_crs + coord_transformer = Transformer.from_crs(input_crs, output_crs, always_xy=True) + xcoord, ycoord = coord_transformer.transform(xcoord, ycoord) + + if isinstance(processing_parameters, dict): + pass + else: + processing_parameters = sprit_utils.checkifpath(processing_parameters) + processing_parameters = import_settings(processing_parameters, settings_import_type='processing', verbose=verbose) + + # Get elevation in meters + if str(elev_unit).lower() in ['feet', 'foot', 'ft', 'f', 'imperial', 'imp', 'american', 'us']: + elevation = elevation * 0.3048 + elev_unit = 'meters' + update_msg.append(f"\t\t Elevations are automatically converted to meters during processing") + update_msg.append(f"\t\t elevation was updated to {elevation} m (from {orig_args['elevation']} ft)") + update_msg.append(f"\t\t elev_unit was also updated to {elev_unit} (from {orig_args['elev_unit']})") + + # Create a unique identifier for each site + if project is None: + proj_id = '' + else: + proj_id = str(project)+'-' + + hvsr_id = f"{proj_id}{acq_date.strftime('%Y%m%d')}-{starttime.strftime('%H%M')}-{station}" + update_msg.append(f"\t\thvsr_id generated from input parameters: {hvsr_id}") + + #Add key/values to input parameter dictionary for use throughout the rest of the package + inputParamDict = {'site':site, 'id_prefix':id_prefix, 'hvsr_id':hvsr_id, 'network':network, 'station':station,'location':loc, 'channels':channels, + 'net':network,'sta':station, 'loc':loc, 'cha':channels, 'instrument':instrument, + 'acq_date':acq_date,'starttime':starttime,'endtime':endtime, 'timezone':'UTC', #Will be in UTC by this point + 'xcoord':xcoord, 'ycoord':ycoord, 'longitude':xcoord_wgs84,'latitude':ycoord_wgs84, + 'elevation':elevation, 'elev_unit':elev_unit, 'input_crs':input_crs, 'output_crs':output_crs, + 'depth':depth, 'input_data': input_data, 'metapath':metapath, 'hvsr_band':hvsr_band, 'peak_freq_range':peak_freq_range, + 'processing_parameters':processing_parameters, 'ProcessingStatus':{'InputParamsStatus':True, 'OverallStatus':True} + } + + #Replace any default parameter settings with those from json file of interest, potentially + instrument_settings_dict = {} + if pathlib.Path(str(instrument)).exists(): + instrument_settings = import_settings(settings_import_path=instrument, settings_import_type='instrument', verbose=verbose) + input_params_args = inspect.getfullargspec(input_params).args + input_params_args.append('net') + input_params_args.append('sta') + for k, settings_value in instrument_settings.items(): + if k in input_params_args: + instrument_settings_dict[k] = settings_value + inputParamDict['instrument_settings'] = inputParamDict['instrument'] + inputParamDict.update(instrument_settings_dict) + + if str(instrument).lower() in raspShakeInstNameList: + if metapath is None or metapath=='': + metapath = pathlib.Path(pkg_resources.resource_filename(__name__, 'resources/rs3dv5plus_metadata.inv')).as_posix() + inputParamDict['metapath'] = metapath + + for settingName in instrument_settings_dict.keys(): + if settingName in inputParamDict.keys(): + inputParamDict[settingName] = instrument_settings_dict[settingName] + + if verbose: + print('Gathering input parameters (input_params())') + for key, value in inputParamDict.items(): + print('\t {}={}'.format(key, value)) + print() + + update_msg.insert(0, '\tThe following parameters were modified from the raw input:') + for msg_line in update_msg: + print(msg_line) + print() + + #Format everything nicely + params = sprit_utils.make_it_classy(inputParamDict) + params['ProcessingStatus']['InputParamsStatus'] = True + params = _check_processing_status(params, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose) + return params
+ + + +# Plot Azimuth data +
+[docs] +def plot_azimuth(hvsr_data, fig=None, ax=None, show_azimuth_peaks=False, interpolate_azimuths=True, show_azimuth_grid=False, show_plot=True, **plot_azimuth_kwargs): + """Function to plot azimuths when azimuths are calculated + + Parameters + ---------- + hvsr_data : HVSRData or HVSRBatch + HVSRData that has gone through at least the sprit.fetch_data() step, and before sprit.generate_ppsds() + show_azimuth_peaks : bool, optional + Whether to display the peak value at each azimuth calculated on the chart, by default False + interpolate_azimuths : bool, optional + Whether to interpolate the azimuth data to get a smoother plot. + This is just for visualization, does not change underlying data. + It takes a lot of time to process the data, but interpolation for vizualization can happen fairly fast. By default True. + show_azimuth_grid : bool, optional + Whether to display the grid on the chart, by default False + + Returns + ------- + matplotlib.Figure, matplotlib.Axis + Figure and axis of resulting azimuth plot + """ + orig_args = locals().copy() #Get the initial arguments + + if isinstance(hvsr_data, HVSRBatch): + #If running batch, we'll loop through each site + for site_name in hvsr_data.keys(): + args = orig_args.copy() #Make a copy so we don't accidentally overwrite + individual_params = hvsr_data[site_name] #Get what would normally be the "params" variable for each site + args['hvsr_data'] = individual_params #reset the params parameter we originally read in to an individual site params + if hvsr_data[site_name]['ProcessingStatus']['OverallStatus']: + try: + hvsr_data['Azimuth_Fig'] = __plot_azimuth_batch(**args) #Call another function, that lets us run this function again + except: + print(f"ERROR: {site_name} will not have azimuths plotted.") + elif isinstance(hvsr_data, HVSRData): + if fig is None: + fig = plt.figure() + + hvsr_band = hvsr_data.hvsr_band + + azDataList = [] + azExtraDataList = [] + + for k in sorted(hvsr_data.hvsr_az.keys()): + currData = hvsr_data.hvsr_az[k] + azDataList.append(currData) + azExtraDataList.append(currData) + + + freq = hvsr_data.x_freqs['Z'].tolist()[1:] + a = np.deg2rad(np.array(sorted(hvsr_data.hvsr_az.keys())).astype(float)) + b = a + np.pi + + z = np.array(azDataList) + z2 =np.array(azExtraDataList) + + def interp_along_theta(orig_array, orig_ind): + newArrayList = [] + for a1 in orig_array.T: + # Resample the array along the first dimension using numpy.interp + newZ = np.interp( + np.linspace(np.pi/180, np.pi, 180), # New indices + orig_ind, # Original indices + a1) + newArrayList.append(newZ) + return np.array(newArrayList).T + + if 'plot_type' in plot_azimuth_kwargs.keys(): + if 'i' in plot_azimuth_kwargs['plot_type']: + interpolate_azimuths = True + if '-i' in plot_azimuth_kwargs['plot_type']: + interpolate_azimuths = False + + + if interpolate_azimuths: + z = interp_along_theta(z, a) + z2 = interp_along_theta(z2, a) + + a = np.linspace(np.deg2rad(1), np.pi, 180) + b = (a + np.pi).tolist() + a = a.tolist() + + r, th = np.meshgrid(freq, a) + r2, th2 = np.meshgrid(freq, b) + + # Set up plot + if ax is None: + ax = plt.subplot(polar=True) + plt.title(hvsr_data['site']) + + else: + plt.sca(ax) + + plt.semilogy() + ax.set_theta_zero_location("N") + ax.set_theta_direction(-1) + plt.xlim([0, np.pi*2]) + plt.ylim([hvsr_band[1], hvsr_band[0]]) + + # Plot data + pmesh1 = plt.pcolormesh(th, r, z, cmap = 'jet') + pmesh2 = plt.pcolormesh(th2, r2, z2, cmap = 'jet') + + azList = ['azimuth', 'az', 'a', 'radial', 'r'] + azOpts = [] + if 'plot_type' in plot_azimuth_kwargs.keys(): + if type(plot_azimuth_kwargs['plot_type']) is str: + ptList = plot_azimuth_kwargs['plot_type'].split(' ') + elif isinstance(plot_azimuth_kwargs['plot_type'], (list, tuple)): + ptList = list(plot_azimuth_kwargs['plot_type']) + + for az in azList: + if az in ptList: + azOpts = [item.lower() for item in ptList[ptList.index(az)+1:]] + + if 'p' in azOpts: + show_azimuth_peaks = True + + if 'g' in azOpts: + show_azimuth_grid = True + + if show_azimuth_peaks: + peakVals = [] + peakThetas = [] + for k in sorted(hvsr_data.hvsr_az.keys()): + peakVals.append(hvsr_data.BestPeak[k]['f0']) + peakThetas.append(int(k)) + peakThetas = peakThetas + (180 + np.array(peakThetas)).tolist() + peakThetas = np.deg2rad(peakThetas).tolist() + peakVals = peakVals + peakVals + peakVals.append(peakVals[0]) + peakThetas.append(peakThetas[0]+(np.pi*2)) + peakThetas.append(peakThetas[1]+(np.pi*2)) + + peakThetas = (np.convolve(peakThetas, np.ones(2), 'full')/2).tolist()[1:-1] + newThetas = [] + newVals = [] + for i, p in enumerate(peakThetas): + newThetas.append(p) + newThetas.append(p) + if i == 0: + newVals.append(peakVals[-1]) + newVals.append(peakVals[-1]) + else: + newVals.append(peakVals[i]) + newVals.append(peakVals[i]) + + newThetas.insert(0, newThetas[-1]) + newThetas.pop() + + newVals.append(newVals[0]) + newThetas.append(newThetas[0]) + + #peakThetas = newThetas + #peakVals = newVals + if len(peakThetas) >= 20: + alphaVal = 0.2 + else: + alphaVal = 0.9 - (19/28) + plt.scatter(peakThetas, peakVals, marker='h', facecolors='none', edgecolors='k', alpha=alphaVal) + #plt.plot(a, r, ls='none', color = 'k') + + if show_azimuth_grid: + plt.grid(visible=show_azimuth_grid, which='both', alpha=0.5) + plt.grid(visible=show_azimuth_grid, which='major', c='k', linewidth=1, alpha=1) + #plt.colorbar(pmesh1) + if show_plot: + plt.show() + + hvsr_data['AzimuthFig'] = fig + else: + warnings.warn(f'hvsr_data must be of type HVSRData or HVSRBatch, not {type(hvsr_data)}') + return fig, ax
+ + + +# Main function for plotting results +
+[docs] +def plot_hvsr(hvsr_data, plot_type='HVSR ann p C+ ann p SPEC ann p', azimuth='HV', use_subplots=True, fig=None, ax=None, return_fig=False, plot_engine='matplotlib', save_dir=None, save_suffix='', show_legend=False, show_plot=True, close_figs=False, clear_fig=True,**kwargs): + """Function to plot HVSR data + + Parameters + ---------- + hvsr_data : dict + Dictionary containing output from process_hvsr function + plot_type : str or list, default = 'HVSR ann p C+ ann p SPEC ann p' + The plot_type of plot(s) to plot. If list, will plot all plots listed + - 'HVSR' - Standard HVSR plot, including standard deviation. Options are included below: + - 'p' shows a vertical dotted line at frequency of the "best" peak + - 'ann' annotates the frequency value of of the "best" peak + - 'all' shows all the peaks identified in check_peaks() (by default, only the max is identified) + - 't' shows the H/V curve for all time windows + - 'tp' shows all the peaks from the H/V curves of all the time windows + - 'fr' shows the window within which SpRIT will search for peak frequencies, as set by peak_freq_range + - 'test' shows a visualization of the results of the peak validity test(s). Examples: + - 'tests' visualizes the results of all the peak tests (not the curve tests) + - 'test12' shows the results of tests 1 and 2. + - Append any number 1-6 after 'test' to show a specific test result visualized + - 'COMP' - plot of the PPSD curves for each individual component ("C" also works) + - '+' (as a suffix in 'C+' or 'COMP+') plots C on a plot separate from HVSR (C+ is default, but without + will plot on the same plot as HVSR) + - 'p' shows a vertical dotted line at frequency of the "best" peak + - 'ann' annotates the frequency value of of the "best" peak + - 'all' shows all the peaks identified in check_peaks() (by default, only the max is identified) + - 't' shows the H/V curve for all time windows + - 'SPEC' - spectrogram style plot of the H/V curve over time + - 'p' shows a horizontal dotted line at the frequency of the "best" peak + - 'ann' annotates the frequency value of the "best" peak + - 'all' shows all the peaks identified in check_peaks() + - 'tp' shows all the peaks of the H/V curve at all time windows + - 'AZ' - circular plot of calculated azimuthal HV curves, similar in style to SPEC plot. + - 'p' shows a point at each calculated (not interpolated) azimuth peak + - 'g' shows grid lines at various angles + - 'i' interpolates so that there is an interpolated azimuth at each degree interval (1 degree step) + This is the default, so usually 'i' is not needed. + - '-i' prohibits interpolation (only shows the calculated azimuths, as determined by azimuth_angle (default = 30)) + azimuth : str, default = 'HV' + What 'azimuth' to plot, default being standard N E components combined + use_subplots : bool, default = True + Whether to output the plots as subplots (True) or as separate plots (False) + fig : matplotlib.Figure, default = None + If not None, matplotlib figure on which plot is plotted + ax : matplotlib.Axis, default = None + If not None, matplotlib axis on which plot is plotted + return_fig : bool + Whether to return figure and axis objects + plot_engine : str, default='Matplotlib' + Which engine to use for plotting. Both "matplotlib" and "plotly" are acceptable. For shorthand, 'mpl', 'm' also work for matplotlib; 'plty' or 'p' also work for plotly. Not case sensitive. + save_dir : str or None + Directory in which to save figures + save_suffix : str + Suffix to add to end of figure filename(s), if save_dir is used + show_legend : bool, default=False + Whether to show legend in plot + show_plot : bool + Whether to show plot + close_figs : bool, default=False + Whether to close figures before plotting + clear_fig : bool, default=True + Whether to clear figures before plotting + **kwargs : keyword arguments + Keyword arguments for matplotlib.pyplot + + Returns + ------- + fig, ax : matplotlib figure and axis objects + Returns figure and axis matplotlib.pyplot objects if return_fig=True, otherwise, simply plots the figures + """ + orig_args = locals().copy() #Get the initial arguments + if isinstance(hvsr_data, HVSRBatch): + #If running batch, we'll loop through each site + for site_name in hvsr_data.keys(): + args = orig_args.copy() #Make a copy so we don't accidentally overwrite + individual_params = hvsr_data[site_name] #Get what would normally be the "params" variable for each site + args['hvsr_results'] = individual_params #reset the params parameter we originally read in to an individual site params + if hvsr_data[site_name]['ProcessingStatus']['OverallStatus']: + try: + __hvsr_plot_batch(**args) #Call another function, that lets us run this function again + except: + print(f"{site_name} not able to be plotted.") + + return + + mplList = ['matplotlib', 'mpl', 'm'] + plotlyList = ['plotly', 'plty', 'p'] + + if plot_engine.lower() in plotlyList: + plotlyFigure = sprit_plot.plot_results(hvsr_data, plot_string=plot_type, results_fig=fig, return_fig=return_fig, show_results_plot=show_plot) + if return_fig: + return plotlyFigure + else: #plot_engine.lower() in mplList or any other value not in plotly list + if clear_fig and fig is not None and ax is not None: #Intended use for tkinter + #Clear everything + for key in ax: + ax[key].clear() + for t in fig.texts: + del t + fig.clear() + if close_figs: + plt.close('all') + + # The possible identifiers in plot_type for the different kind of plots + hvsrList = ['hvsr', 'hv', 'h'] + compList = ['c', 'comp', 'component', 'components'] + specgramList = ['spec', 'specgram', 'spectrogram'] + azList = ['azimuth', 'az', 'a', 'radial', 'r'] + + hvsrInd = np.nan + compInd = np.nan + specInd = np.nan + azInd = np.nan + + plot_type = plot_type.replace(',', '') + kList = plot_type.split(' ') + for i, k in enumerate(kList): + kList[i] = k.lower() + + # Get the plots in the right order, no matter how they were input (and ensure the right options go with the right plot) + # HVSR index + if len(set(hvsrList).intersection(kList)): + for i, hv in enumerate(hvsrList): + if hv in kList: + hvsrInd = kList.index(hv) + break + # Component index + #if len(set(compList).intersection(kList)): + for i, c in enumerate(kList): + if '+' in c and c[:-1] in compList: + compInd = kList.index(c) + break + + # Specgram index + if len(set(specgramList).intersection(kList)): + for i, sp in enumerate(specgramList): + if sp in kList: + specInd = kList.index(sp) + break + + # Azimuth index + if len(set(azList).intersection(kList)): + for i, sp in enumerate(azList): + if sp in kList: + azInd = kList.index(sp) + break + + + # Get indices for all plot type indicators + indList = [hvsrInd, compInd, specInd, azInd] + indListCopy = indList.copy() + plotTypeList = ['hvsr', 'comp', 'spec', 'az'] + + plotTypeOrder = [] + plotIndOrder = [] + + # Get lists with first and last indices of the specifiers for each plot + lastVal = 0 + while lastVal != 99: + firstInd = np.nanargmin(indListCopy) + plotTypeOrder.append(plotTypeList[firstInd]) + plotIndOrder.append(indList[firstInd]) + lastVal = indListCopy[firstInd] + indListCopy[firstInd] = 99 #just a high number + + plotTypeOrder.pop() + plotIndOrder[-1] = len(kList) + + # Get + for i, p in enumerate(plotTypeOrder): + pStartInd = plotIndOrder[i] + pEndInd = plotIndOrder[i+1] + plotComponents = kList[pStartInd:pEndInd] + + if use_subplots and i == 0 and fig is None and ax is None: + mosaicPlots = [] + for pto in plotTypeOrder: + if pto == 'az': + for i, subp in enumerate(mosaicPlots): + if (subp[0].lower() == 'hvsr' or subp[0].lower() == 'comp') and len([item for item in plotTypeOrder if item != "hvsr"]) > 0: + mosaicPlots[i].append(subp[0]) + mosaicPlots[i].append(subp[0]) + else: + mosaicPlots[i].append(subp[0]) + mosaicPlots[i].append(pto) + else: + mosaicPlots.append([pto]) + perSubPDict = {} + if 'az' in plotTypeOrder: + perSubPDict['az'] = {'projection':'polar'} + fig, ax = plt.subplot_mosaic(mosaicPlots, per_subplot_kw=perSubPDict, layout='constrained') + axis = ax[p] + elif use_subplots: + with warnings.catch_warnings(): + warnings.simplefilter("ignore") #Often warns about xlim when it is not an issue + if hasattr(ax, '__len__'):#print(dir(ax), ax, len(ax)) + ax[p].clear() + axis = ax[p] + else: + fig, axis = plt.subplots() + + if p == 'hvsr': + kwargs['subplot'] = p + fig, ax[p] = _plot_hvsr(hvsr_data, fig=fig, ax=axis, plot_type=plotComponents, azimuth=azimuth, xtype='x_freqs', show_legend=show_legend, axes=ax, **kwargs) + elif p == 'comp': + plotComponents[0] = plotComponents[0][:-1] + kwargs['subplot'] = p + fig, ax[p] = _plot_hvsr(hvsr_data, fig=fig, ax=axis, plot_type=plotComponents, azimuth=azimuth, xtype='x_freqs', show_legend=show_legend, axes=ax, **kwargs) + elif p == 'spec': + plottypeKwargs = {} + for c in plotComponents: + plottypeKwargs[c] = True + kwargs.update(plottypeKwargs) + _plot_specgram_hvsr(hvsr_data, fig=fig, ax=axis, azimuth=azimuth, colorbar=False, **kwargs) + elif p == 'az': + kwargs['plot_type'] = plotComponents + hvsr_data['Azimuth_fig'] = plot_azimuth(hvsr_data, fig=fig, ax=axis, **kwargs) + else: + warnings.warn('Plot type {p} not recognized', UserWarning) + + windowsUsedStr = f"{hvsr_data['hvsr_windows_df']['Use'].astype(bool).sum()}/{hvsr_data['hvsr_windows_df'].shape[0]} windows used" + fig.text(x=1, y=0.0, s=windowsUsedStr, ha='right', va='bottom', fontsize='xx-small', + bbox=dict(facecolor='w', edgecolor=None, linewidth=0, alpha=1, pad=-1)) + + matplotlib.rcParams["figure.constrained_layout.h_pad"] = 0.075 + #if use_subplots: + # fig.subplots_adjust()#.set(h_pad=0.075, hspace=-5) + if show_plot: + fig.canvas.draw() + + if return_fig: + return fig + + return
+ + + +# Plot Obspy Trace in axis using matplotlib +
+[docs] +def plot_stream(stream, params, fig=None, axes=None, show_plot=False, ylim_std=0.75, return_fig=True): + """Function to plot a stream of data with Z, E, N components using matplotlib. Similar to obspy.Stream.Plot(), but will be formatted differently and eventually more customizable. + This is also used in various functions throughout the package. + + Parameters + ---------- + stream : obspy.core.Stream.stream + Obpsy stream of data with Z, E, N componenents + params : HVSRData or HVSRBatch + Data object with parameters relevant for creating plot + fig : matplotlib.Figure, default=None + Optional: if not None, matplotlib.Figure in which to plot the resulting figure (i.e., can be plotted in existing figure) + axes : matplotlib.Axis, default=None + Optional: if not None, matplotlib.Axis in which to plot the resulting figure (i.e., can be plotted in existing axis) + show_plot : bool, default=False + Whether to do matplotlib.pylot.show(), by default False + ylim_std : float, default = 0.75 + Optional: the standard deviation of the data at which to clip the chart, by default 0.75 + return_fig : bool, default=True + Optional: whether to return the figure, by default True + + Returns + ------- + (matplotlib.Figure, matplotlib.Axes) + Tuple containing the figure and axes of the resulting plot, only returned if return_fig = True + """ + if fig is None and axes is None: + fig, axes = plt.subplot_mosaic([['Z'],['N'],['E']], sharex=True, sharey=False) + + new_stream = stream.copy() + #axis.plot(trace.times, trace.data) + + sTime = stream[0].stats.starttime + timeList = {} + mplTimes = {} + + #In case data is masked, need to split, decimate, then merge back together + if isinstance(new_stream[0].data, np.ma.masked_array): + new_stream = new_stream.split() + new_stream.decimate(10) + new_stream.merge() + + zStream = new_stream.select(component='Z')#[0] + eStream = new_stream.select(component='E')#[0] + nStream = new_stream.select(component='N')#[0] + streams = [zStream, nStream, eStream] + + for st in streams: + key = st[0].stats.component + timeList[key] = [] + mplTimes[key] = [] + for tr in st: + for t in np.ma.getdata(tr.times()): + newt = sTime + t + timeList[key].append(newt) + mplTimes[key].append(newt.matplotlib_date) + + #Ensure that the min and max times for each component are the same + for i, k in enumerate(mplTimes.keys()): + currMin = np.min(list(map(np.min, mplTimes[k]))) + currMax = np.max(list(map(np.max, mplTimes[k]))) + + if i == 0: + xmin = currMin + xmax = currMax + else: + if xmin > currMin: + xmin = currMin + if xmax < currMax: + xmax = currMax + + axes['Z'].xaxis_date() + axes['N'].xaxis_date() + axes['E'].xaxis_date() + + #tTicks = mdates.MinuteLocator(interval=5) + #axis.xaxis.set_major_locator(tTicks) + axes['E'].xaxis.set_major_locator(mdates.MinuteLocator(byminute=range(0,60,5))) + axes['E'].xaxis.set_major_formatter(mdates.DateFormatter('%H:%M')) + axes["E"].xaxis.set_minor_locator(mdates.MinuteLocator(interval=1)) + axes["E"].tick_params(axis='x', labelsize=8) + + + streams = [zStream.merge(method=1), + nStream.merge(method=1), + eStream.merge(method=1)] + + for st in streams: + for i, tr in enumerate(st): + key = tr.stats.component + if key == 'Z': + C='k' + elif key=='N': + C='r' + else: + C='b' + axes[key].plot(mplTimes[key], tr.data, color=C, linewidth=0.15) + + + axes['Z'].set_ylabel('Z') + axes['N'].set_ylabel('N') + axes['E'].set_ylabel('E') + + #stDz = np.abs(np.nanstd(stream.select(component='Z')[0].data)) + #stDn = np.abs(np.nanstd(stream.select(component='N')[0].data)) + #stDe = np.abs(np.nanstd(stream.select(component='E')[0].data)) + #stD = max([stDz, stDn, stDe]) + + for i, comp in enumerate(list(mplTimes.keys())): + stD = np.abs(np.nanstd(np.ma.getdata(stream.select(component=comp)[0].data))) + dmed = np.nanmedian(np.ma.getdata(stream.select(component=comp)[0].data)) + + axes[comp].set_ylim([dmed-ylim_std*stD, dmed+ylim_std*stD]) + if xmin < 0: + xmin=params['hvsr_band'][0] + axes[comp].set_xlim([xmin, xmax]) + + fig.suptitle(params['site']) + + day = "{}-{}-{}".format(stream[0].stats.starttime.year, stream[0].stats.starttime.month, stream[0].stats.starttime.day) + axes['E'].set_xlabel('UTC Time \n'+ day) + + #plt.rcParams['figure.dpi'] = 100 + #plt.rcParams['figure.figsize'] = (5,4) + + #fig.tight_layout() + fig.canvas.draw() + + if show_plot: + plt.show() + + if return_fig: + return fig + return
+ + + +# Main function for processing HVSR Curve +
+[docs] +def process_hvsr(hvsr_data, horizontal_method=None, smooth=True, freq_smooth='konno ohmachi', f_smooth_width=40, resample=True, outlier_curve_rmse_percentile=False, azimuth=None, verbose=False): + """Process the input data and get HVSR data + + This is the main function that uses other (private) functions to do + the bulk of processing of the HVSR data and the data quality checks. + + Parameters + ---------- + hvsr_data : HVSRData or HVSRBatch + Data object containing all the parameters input and generated by the user (usually, during sprit.input_params(), sprit.fetch_data(), sprit.generate_ppsds() and/or sprit.remove_noise()). + horizontal_method : int or str, default=3 + Method to use for combining the horizontal components. Default is 3) Geometric Mean + 0) (not used) + 1) 'Diffuse field assumption' H = √( (eie_E + eie_N) / eie_Z), eie = equal interval energy + 2) 'Arithmetic Mean' H ≡ (HN + HE)/2 + 3) 'Geometric Mean' H ≡ √(HN · HE), recommended by the SESAME project (2004) + 4) 'Vector Summation' H ≡ √(HN^2 + HE^2) + 5) 'Quadratic Mean' H ≡ √(HN^2 + HE^2)/2 + 6) 'Maximum Horizontal Value' H ≡ max {HN, HE} + 7) 'Minimum Horizontal Valey' H ≡ min {HN, HE} + 8) 'Single Azimuth' H = H2·cos(az) + H1·sin(az) + smooth : bool, default=True + bool or int may be used. + If True, default to smooth H/V curve to using savgoy filter with window length of 51 (works well with default resample of 1000 pts) + If int, the length of the window in the savgoy filter. + freq_smooth : str {'konno ohmachi', 'constant', 'proportional'} + Which frequency smoothing method to use. By default, uses the 'konno ohmachi' method. + - The Konno & Ohmachi method uses the obspy.signal.konnoohmachismoothing.konno_ohmachi_smoothing() function: https://docs.obspy.org/packages/autogen/obspy.signal.konnoohmachismoothing.konno_ohmachi_smoothing.html + - The constant method uses a window of constant length f_smooth_width + - The proportional method uses a window the percentage length of the frequncy steps/range (f_smooth_width now refers to percentage) + See here for more information: https://www.geopsy.org/documentation/geopsy/hv-processing.html + f_smooth_width : int, default = 40 + - For 'konno ohmachi': passed directly to the bandwidth parameter of the konno_ohmachi_smoothing() function, determines the width of the smoothing peak, with lower values resulting in broader peak. Must be > 0. + - For 'constant': the size of a triangular smoothing window in the number of frequency steps + - For 'proportional': the size of a triangular smoothing window in percentage of the number of frequency steps (e.g., if 1000 frequency steps/bins and f_smooth_width=40, window would be 400 steps wide) + resample : bool, default = True + bool or int. + If True, default to resample H/V data to include 1000 frequency values for the rest of the analysis + If int, the number of data points to interpolate/resample/smooth the component psd/HV curve data to. + outlier_curve_rmse_percentile : bool, float, default = False + If False, outlier curve removal is not carried out here. + If True, defaults to 98 (98th percentile). + Otherwise, float of percentile used as rmse_thresh of remove_outlier_curve(). + azimuth : float, default = None + The azimuth angle to use when method is single azimuth. + verbose : bool, defualt=False + Whether to print output to terminal + + Returns + ------- + hvsr_out : dict + Dictionary containing all the information about the data, including input parameters + + """ + orig_args = locals().copy() #Get the initial arguments + start_time = datetime.datetime.now() + + # Update with processing parameters specified previously in input_params, if applicable + if 'processing_parameters' in hvsr_data.keys(): + if 'process_hvsr' in hvsr_data['processing_parameters'].keys(): + update_msg = [] + for k, v in hvsr_data['processing_parameters']['process_hvsr'].items(): + defaultVDict = dict(zip(inspect.getfullargspec(process_hvsr).args[1:], + inspect.getfullargspec(process_hvsr).defaults)) + # Manual input to function overrides the imported parameter values + if (not isinstance(v, (HVSRData, HVSRBatch))) and (k in orig_args.keys()) and (orig_args[k]==defaultVDict[k]): + update_msg.append(f'\t\t{k} = {v} (previously {orig_args[k]})') + orig_args[k] = v + + horizontal_method = orig_args['horizontal_method'] + smooth = orig_args['smooth'] + freq_smooth = orig_args['freq_smooth'] + f_smooth_width = orig_args['f_smooth_width'] + resample = orig_args['resample'] + outlier_curve_rmse_percentile = orig_args['outlier_curve_rmse_percentile'] + verbose = orig_args['verbose'] + + if (verbose and isinstance(hvsr_data, HVSRBatch)) or (verbose and not hvsr_data['batch']): + if isinstance(hvsr_data, HVSRData) and hvsr_data['batch']: + pass + else: + print('\nCalculating Horizontal/Vertical Ratios at all frequencies/time steps (process_hvsr())') + print('\tUsing the following parameters:') + for key, value in orig_args.items(): + if key=='hvsr_data': + pass + else: + print('\t {}={}'.format(key, value)) + print() + + if 'processing_parameters' in hvsr_data.keys() and 'process_hvsr' in hvsr_data['processing_parameters'].keys(): + if update_msg != []: + update_msg.insert(0, '\tThe following parameters were updated using the processing_parameters attribute:') + for msg_line in update_msg: + print(msg_line) + print() + + # PROCESSING STARTS HERE (SEPARATE LOOP FOR BATCH) + #Site is in the keys anytime it's not batch + if isinstance(hvsr_data, HVSRBatch): + #If running batch, we'll loop through each site + hvsr_out = {} + for site_name in hvsr_data.keys(): + args = orig_args.copy() #Make a copy so we don't accidentally overwrite + args['hvsr_data'] = hvsr_data[site_name] #Get what would normally be the "hvsr_data" variable for each site + if hvsr_data[site_name]['ProcessingStatus']['OverallStatus']: + try: + hvsr_out[site_name] = __process_hvsr_batch(**args) #Call another function, that lets us run this function again + except: + hvsr_out = hvsr_data + hvsr_out[site_name]['ProcessingStatus']['HVStatus']=False + hvsr_out[site_name]['ProcessingStatus']['OverallStatus'] = False + else: + hvsr_out = hvsr_data + hvsr_out[site_name]['ProcessingStatus']['HVStatus']=False + hvsr_out[site_name]['ProcessingStatus']['OverallStatus'] = False + hvsr_out = HVSRBatch(hvsr_out) + hvsr_out = _check_processing_status(hvsr_out, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose) + return hvsr_out + + ppsds = hvsr_data['ppsds'].copy()#[k]['psd_values'] + ppsds = sprit_utils.check_xvalues(ppsds) + + methodList = ['<placeholder_0>', # 0 + 'Diffuse Field Assumption', # 1 + 'Arithmetic Mean', # 2 + 'Geometric Mean', # 3 + 'Vector Summation', # 4 + 'Quadratic Mean', # 5 + 'Maximum Horizontal Value', # 6 + 'Minimum Horizontal Value', # 7 + 'Single Azimuth' ] # 8 + x_freqs = {} + x_periods = {} + + psdValsTAvg = {} + stDev = {} + stDevValsP = {} + stDevValsM = {} + psdRaw={} + currTimesUsed={} + hvsr_data['hvsr_windows_df']['Use'] = hvsr_data['hvsr_windows_df']['Use'].astype(bool) + hvsrDF = hvsr_data['hvsr_windows_df'] + def move_avg(y, box_pts): + #box = np.ones(box_pts)/box_pts + box = np.hanning(box_pts) + y_smooth = np.convolve(y, box, mode='same') / sum(box) + return y_smooth + + for k in ppsds.keys(): + #input_ppsds = ppsds[k]['psd_values'] #original, not used anymore + input_ppsds = np.stack(hvsrDF['psd_values_'+k].values) + + #currPPSDs = hvsrDF['psd_values_'+k][hvsrDF['Use']].values + #used_ppsds = np.stack(currPPSDs) + + xValMin_per = np.round(1/hvsr_data['hvsr_band'][1], 4) + xValMax_per = np.round(1/hvsr_data['hvsr_band'][0], 4) + + #if reasmpling has been selected + if resample is True or type(resample) is int or type(resample) is float: + if resample is True: + resample = 1000 #Default smooth value + + #xValMin_per = min(ppsds[k]['period_bin_centers']) + #xValMax_per = max(ppsds[k]['period_bin_centers']) + + #Resample period bin values + #print('resample, prelogspace', x_periods[k].shape) + x_periods[k] = np.logspace(np.log10(xValMin_per), np.log10(xValMax_per), num=resample) + if smooth or isinstance(smooth, (int, float)): + if smooth: + smooth = 51 #Default smoothing window + padVal = 25 + elif smooth % 2==0: + smooth +1 #Otherwise, needs to be odd + padVal = smooth//2 + if padVal %2 == 0: + padVal += 1 + + #Resample raw ppsd values + for i, ppsd_t in enumerate(input_ppsds): + if i==0: + psdRaw[k] = np.interp(x_periods[k], ppsds[k]['period_bin_centers'], ppsd_t) + if smooth is not False: + padRawKPad = np.pad(psdRaw[k], [padVal, padVal], mode='reflect') + #padRawKPadSmooth = scipy.signal.savgol_filter(padRawKPad, smooth, 3) + padRawKPadSmooth = move_avg(padRawKPad, smooth) + psdRaw[k] = padRawKPadSmooth[padVal:-padVal] + + else: + psdRaw[k] = np.vstack((psdRaw[k], np.interp(x_periods[k], ppsds[k]['period_bin_centers'], ppsd_t))) + if smooth is not False: + padRawKiPad = np.pad(psdRaw[k][i], [padVal, padVal], mode='reflect') + #padRawKiPadSmooth = scipy.signal.savgol_filter(padRawKiPad, smooth, 3) + padRawKiPadSmooth = move_avg(padRawKiPad, smooth) + psdRaw[k][i] = padRawKiPadSmooth[padVal:-padVal] + + else: + #If no resampling desired + #x_periods[k] = np.array(ppsds[k]['period_bin_centers']) + x_periods[k] = np.array(ppsds[k]['period_bin_centers'])#[:-1]#np.round([1/p for p in hvsr_data['ppsds'][k]['period_xedges'][:-1]], 3) + + # Clean up edge freq. values + x_periods[k][0] = 1/hvsr_data['hvsr_band'][1] + x_periods[k][-1] = 1/hvsr_data['hvsr_band'][0] + psdRaw[k] = np.array(input_ppsds) + + hvsrDF['psd_values_'+k] = list(psdRaw[k]) + use = hvsrDF['Use'].astype(bool) + + #Get average psd value across time for each channel (used to calc main H/V curve) + psdValsTAvg[k] = np.nanmedian(np.stack(hvsrDF[use]['psd_values_'+k]), axis=0) + x_freqs[k] = np.array([1/p for p in x_periods[k]]) #np.divide(np.ones_like(x_periods[k]), x_periods[k]) + stDev[k] = np.nanstd(np.stack(hvsrDF[use]['psd_values_'+k]), axis=0) + + stDevValsM[k] = np.array(psdValsTAvg[k] - stDev[k]) + stDevValsP[k] = np.array(psdValsTAvg[k] + stDev[k]) + + currTimesUsed[k] = np.stack(hvsrDF[use]['TimesProcessed_Obspy']) + #currTimesUsed[k] = ppsds[k]['current_times_used'] #original one + + # Get string of horizontal_method type + # First, define default + if horizontal_method is None: + horizontal_method = 3 # Geometric mean is used as default if nothing is specified + + # If an azimuth has been calculated and it's only one, automatically use the single azimuth method + if len(hvsr_data.stream.merge().select(component='R')) == 1: + horizontal_method = 8 # Single azimuth + + # horizontal_method needs to be str or int + # First check if input is a string + if type(horizontal_method) is str: + if horizontal_method.isdigit(): + horizontal_method = int(horizontal_method) + else: + horizontal_method = methodList.index(horizontal_method.title()) + + # Now, horizontal_method is int no matter how it was entered + methodInt = horizontal_method + horizontal_method = methodList[horizontal_method] + + hvsr_data['horizontal_method'] = horizontal_method + + #This gets the main hvsr curve averaged from all time steps + anyK = list(x_freqs.keys())[0] + hvsr_curve, hvsr_az, hvsr_tSteps = __get_hvsr_curve(x=x_freqs[anyK], psd=psdValsTAvg, horizontal_method=methodInt, hvsr_data=hvsr_data, azimuth=azimuth, verbose=verbose) + origPPSD = hvsr_data['ppsds_obspy'].copy() + + #print('hvcurv', np.array(hvsr_curve).shape) + #print('hvaz', np.array(hvsr_az).shape) + + #Add some other variables to our output dictionary + hvsr_dataUpdate = {'input_params':hvsr_data, + 'x_freqs':x_freqs, + 'hvsr_curve':hvsr_curve, + 'hvsr_az':hvsr_az, + 'x_period':x_periods, + 'psd_raw':psdRaw, + 'current_times_used': currTimesUsed, + 'psd_values_tavg':psdValsTAvg, + 'ppsd_std':stDev, + 'ppsd_std_vals_m':stDevValsM, + 'ppsd_std_vals_p':stDevValsP, + 'horizontal_method':horizontal_method, + 'ppsds':ppsds, + 'ppsds_obspy':origPPSD, + 'tsteps_used': hvsr_data['tsteps_used'].copy(), + 'hvsr_windows_df':hvsr_data['hvsr_windows_df'] + } + + hvsr_out = HVSRData(hvsr_dataUpdate) + + #This is if manual editing was used (should probably be updated at some point to just use masks) + if 'x_windows_out' in hvsr_data.keys(): + hvsr_out['x_windows_out'] = hvsr_data['x_windows_out'] + else: + hvsr_out['x_windows_out'] = [] + + freq_smooth_ko = ['konno ohmachi', 'konno-ohmachi', 'konnoohmachi', 'konnohmachi', 'ko', 'k'] + freq_smooth_constant = ['constant', 'const', 'c'] + freq_smooth_proport = ['proportional', 'proportion', 'prop', 'p'] + + #Frequency Smoothing + if not freq_smooth: + if verbose: + warnings.warn('No frequency smoothing is being applied. This is not recommended for noisy datasets.') + elif freq_smooth is True or (freq_smooth.lower() in freq_smooth_ko and (not not f_smooth_width and not not freq_smooth)): + from obspy.signal import konnoohmachismoothing + for k in hvsr_out['psd_raw']: + colName = f'psd_values_{k}' + + ppsd_data = np.stack(hvsr_out['hvsr_windows_df'][colName]) + ppsd_data = hvsr_out['psd_raw'][k] + + + freqs = hvsr_out['x_freqs'][k] + padding_length = int(f_smooth_width) + + padding_value_R = np.nanmean(ppsd_data[:,-1*padding_length:]) + padding_value_L = np.nanmean(ppsd_data[:,:padding_length]) + + # Pad the data to prevent boundary anamolies + padded_ppsd_data = np.pad(ppsd_data, ((0, 0), (padding_length, padding_length)), + 'constant', constant_values=(padding_value_L, padding_value_R)) + + # Pad the frequencies + ratio = freqs[1] / freqs[0] + # Generate new elements on either side and combine + left_padding = [freqs[0] / (ratio ** i) for i in range(padding_length, 0, -1)] + right_padding = [freqs[-1] * (ratio ** i) for i in range(1, padding_length + 1)] + padded_freqs = np.concatenate([left_padding, freqs, right_padding]) + + #Filter out UserWarning for just this method, since it throws up a UserWarning that doesn't really matter about dtypes often + with warnings.catch_warnings(): + #warnings.simplefilter('ignore', category=UserWarning) + padded_ppsd_data = padded_ppsd_data.astype(padded_freqs.dtype) # Make them the same datatype + padded_ppsd_data = np.round(padded_ppsd_data, 12) # Prevent overflows + padded_freqs = np.round(padded_freqs, 9) + + smoothed_ppsd_data = konnoohmachismoothing.konno_ohmachi_smoothing(padded_ppsd_data, padded_freqs, + bandwidth=f_smooth_width, normalize=True) + + # Only use the original, non-padded data + smoothed_ppsd_data = smoothed_ppsd_data[:,padding_length:-1*padding_length] + hvsr_out['psd_raw'][k] = smoothed_ppsd_data + hvsr_out['hvsr_windows_df'][colName] = pd.Series(list(smoothed_ppsd_data), index=hvsr_out['hvsr_windows_df'].index) + elif freq_smooth.lower() in freq_smooth_constant: + hvsr_out = __freq_smooth_window(hvsr_out, f_smooth_width, kind_freq_smooth='constant') + elif freq_smooth.lower() in freq_smooth_proport: + hvsr_out = __freq_smooth_window(hvsr_out, f_smooth_width, kind_freq_smooth='proportional') + else: + if verbose: + warnings.warn(f'You indicated no frequency smoothing should be applied (freq_smooth = {freq_smooth}). This is not recommended for noisy datasets.') + + #Get hvsr curve from three components at each time step + anyK = list(hvsr_out['psd_raw'].keys())[0] + if horizontal_method==1 or horizontal_method =='dfa' or horizontal_method =='Diffuse Field Assumption': + hvsr_tSteps_az = {} + else: + hvsr_tSteps = [] + hvsr_tSteps_az = {} + for tStep in range(len(hvsr_out['psd_raw'][anyK])): + tStepDict = {} + for k in hvsr_out['psd_raw']: + tStepDict[k] = hvsr_out['psd_raw'][k][tStep] + + hvsr_tstep, hvsr_az_tstep, _ = __get_hvsr_curve(x=hvsr_out['x_freqs'][anyK], psd=tStepDict, horizontal_method=methodInt, hvsr_data=hvsr_out, verbose=verbose) + + hvsr_tSteps.append(np.float32(hvsr_tstep)) #Add hvsr curve for each time step to larger list of arrays with hvsr_curves + for k, v in hvsr_az_tstep.items(): + if tStep == 0: + hvsr_tSteps_az[k] = [np.float32(v)] + else: + hvsr_tSteps_az[k].append(np.float32(v)) + hvsr_out['hvsr_windows_df']['HV_Curves'] = hvsr_tSteps + + # Add azimuth HV Curves to hvsr_windows_df, if applicable + for key, values in hvsr_tSteps_az.items(): + hvsr_out['hvsr_windows_df']['HV_Curves_'+key] = values + + hvsr_out['ind_hvsr_curves'] = {} + for col_name in hvsr_out['hvsr_windows_df']: + if "HV_Curves" in col_name: + if col_name == 'HV_Curves': + colID = 'HV' + else: + colID = col_name.split('_')[2] + hvsr_out['ind_hvsr_curves'][colID] = np.stack(hvsr_out['hvsr_windows_df'][hvsr_out['hvsr_windows_df']['Use']][col_name]) + + #Initialize array based only on the curves we are currently using + indHVCurvesArr = np.stack(hvsr_out['hvsr_windows_df']['HV_Curves'][hvsr_out['hvsr_windows_df']['Use']]) + + if outlier_curve_rmse_percentile: + if outlier_curve_rmse_percentile is True: + outlier_curve_rmse_percentile = 98 + hvsr_out = remove_outlier_curves(hvsr_out, use_percentile=True, rmse_thresh=outlier_curve_rmse_percentile, use_hv_curve=True, verbose=verbose) + + hvsr_out['ind_hvsr_stdDev'] = {} + for col_name in hvsr_out['hvsr_windows_df'].columns: + if "HV_Curves" in col_name: + if col_name == 'HV_Curves': + keyID = 'HV' + else: + keyID = col_name.split('_')[2] + curr_indHVCurvesArr = np.stack(hvsr_out['hvsr_windows_df'][col_name][hvsr_out['hvsr_windows_df']['Use']]) + hvsr_out['ind_hvsr_stdDev'][keyID] = np.nanstd(curr_indHVCurvesArr, axis=0) + + #Get peaks for each time step + hvsr_out['ind_hvsr_peak_indices'] = {} + tStepPFDict = {} + #hvsr_out['hvsr_windows_df']['CurvesPeakFreqs'] = {} + for col_name in hvsr_out['hvsr_windows_df'].columns: + if col_name.startswith("HV_Curves"): + tStepPeaks = [] + if len(col_name.split('_')) > 2: + colSuffix = "_"+'_'.join(col_name.split('_')[2:]) + else: + colSuffix = '_HV' + + for tStepHVSR in hvsr_out['hvsr_windows_df'][col_name]: + tStepPeaks.append(__find_peaks(tStepHVSR)) + hvsr_out['ind_hvsr_peak_indices']['CurvesPeakIndices'+colSuffix] = tStepPeaks + + tStepPFList = [] + for tPeaks in tStepPeaks: + tStepPFs = [] + for pInd in tPeaks: + tStepPFs.append(np.float32(hvsr_out['x_freqs'][anyK][pInd])) + tStepPFList.append(tStepPFs) + tStepPFDict['CurvesPeakFreqs'+colSuffix] = tStepPFList + + indHVPeakIndsDF = pd.DataFrame(hvsr_out['ind_hvsr_peak_indices'], index=hvsr_out['hvsr_windows_df'].index) + tStepPFDictDF = pd.DataFrame(tStepPFDict, index=hvsr_out['hvsr_windows_df'].index) + hvsr_out['hvsr_windows_df'] = pd.concat([hvsr_out['hvsr_windows_df'], indHVPeakIndsDF, tStepPFDictDF], axis=1) + + #Get peaks of main HV curve + hvsr_out['hvsr_peak_indices'] = {} + hvsr_out['hvsr_peak_indices']['HV'] = __find_peaks(hvsr_out['hvsr_curve']) + for k in hvsr_az.keys(): + hvsr_out['hvsr_peak_indices'][k] = __find_peaks(hvsr_out['hvsr_az'][k]) + + #Get frequency values at HV peaks in main curve + hvsr_out['hvsr_peak_freqs'] = {} + for k in hvsr_out['hvsr_peak_indices'].keys(): + hvsrPF = [] + for p in hvsr_out['hvsr_peak_indices'][k]: + hvsrPF.append(hvsr_out['x_freqs'][anyK][p]) + hvsr_out['hvsr_peak_freqs'][k] = np.array(hvsrPF) + + #Get other HVSR parameters (i.e., standard deviations, etc.) + hvsr_out = __gethvsrparams(hvsr_out) + + #Include the original obspy stream in the output + hvsr_out['input_stream'] = hvsr_dataUpdate['input_params']['input_stream'] #input_stream + hvsr_out = sprit_utils.make_it_classy(hvsr_out) + hvsr_out['ProcessingStatus']['HVStatus'] = True + + if 'processing_parameters' not in hvsr_out.keys(): + hvsr_out['processing_parameters'] = {} + hvsr_out['processing_parameters']['generate_ppsds'] = {} + for key, value in orig_args.items(): + hvsr_out['processing_parameters']['generate_ppsds'][key] = value + + if str(horizontal_method) == '8' or horizontal_method.lower() == 'single azimuth': + if azimuth is None: + azimuth = 90 + hvsr_out['single_azimuth'] = azimuth + + hvsr_out = _check_processing_status(hvsr_out, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose) + + return hvsr_out
+ + + +# Read data from Tromino +
+[docs] +def read_tromino_files(input_data, params, struct_format='H', sampling_rate=128, start_byte=24576, verbose=False, **kwargs): + """Function to read data from tromino. Specifically, this has been lightly tested on Tromino 3G+ machines + + Parameters + ---------- + input_data : str, pathlib.Path() + The input parameter _datapath_ from sprit.input_params() + params : HVSRData or HVSRBatch + The parameters as read in from input_params() and and fetch_data() + verbose : bool, optional + Whether to print results to terminal, by default False + + Returns + ------- + obspy.Stream + An obspy.Stream object containing the trace data from the Tromino instrument + """ + dPath = input_data + + strucSizes = {'c':1, 'b':1,'B':1, '?':1, + 'h':2,'H':2,'e':2, + 'i':4,'I':4,'l':4,'L':4,'f':4, + 'q':8,'Q':8,'d':8, + 'n':8,'N':8,'s':16,'p':16,'P':16,'x':16} + + #H (pretty sure it's Q) I L or Q all seem to work (probably not Q?) + structFormat = struct_format + structSize = strucSizes[structFormat] + + dataList = [] + with open(dPath, 'rb') as f: + while True: + data = f.read(structSize) # Read 4 bytes + if not data: # End of file + break + value = struct.unpack(structFormat, data)[0] # Interpret as a float + dataList.append(value) + + import numpy as np + dataArr = np.array(dataList) + import matplotlib.pyplot as plt + + medVal = np.nanmedian(dataArr[50000:100000]) + + if 'start_byte' in kwargs.keys(): + start_byte = kwargs['start_byte'] + + startByte = start_byte + comp1 = dataArr[startByte::3] - medVal + comp2 = dataArr[startByte+1::3] - medVal + comp3 = dataArr[startByte+2::3] - medVal + headerBytes = dataArr[:startByte] + + #fig, ax = plt.subplots(3, sharex=True, sharey=True) + #ax[0].plot(comp1, linewidth=0.1, c='k') + #ax[1].plot(comp2, linewidth=0.1, c='k') + #ax[2].plot(comp3, linewidth=0.1, c='k') + + if 'sampling_rate' in kwargs.keys(): + sampling_rate = kwargs['sampling_rate'] + + sTime = obspy.UTCDateTime(params['acq_date'].year, params['acq_date'].month, params['acq_date'].day, + params['starttime'].hour, params['starttime'].minute, + params['starttime'].second,params['starttime'].microsecond) + eTime = sTime + (((len(comp1))/sampling_rate)/60)*60 + + loc = '' + if type(params['station']) is int or params['station'].isdigit(): + loc = str(params['station']) + + traceHeader1 = {'sampling_rate':sampling_rate, + 'calib' : 1, + 'npts':len(comp1), + 'network':'AM', + 'location': loc, + 'station' : 'TRMNO', + 'channel':'EHE', + 'starttime':sTime} + + traceHeader2=traceHeader1.copy() + traceHeader3=traceHeader1.copy() + traceHeader2['channel'] = 'EHN' + traceHeader3['channel'] = 'EHZ' + + trace1 = obspy.Trace(data=comp1, header=traceHeader1) + trace2 = obspy.Trace(data=comp2, header=traceHeader2) + trace3 = obspy.Trace(data=comp3, header=traceHeader3) + + st = obspy.Stream([trace1, trace2, trace3]) + return st
+ + + +# Function to remove noise windows from data +
+[docs] +def remove_noise(hvsr_data, remove_method=None, processing_window=None, sat_percent=0.995, noise_percent=0.80, sta=2, lta=30, stalta_thresh=[8, 16], warmup_time=0, cooldown_time=0, min_win_size=1, remove_raw_noise=False, show_stalta_plot=False, verbose=False): + """Function to remove noisy windows from data, using various methods. + + Methods include + - Manual window selection (by clicking on a chart with spectrogram and stream data), + - Auto window selection, which does the following two in sequence (these can also be done indepently): + - A sta/lta "antitrigger" method (using stalta values to automatically remove triggered windows where there appears to be too much noise) + - A noise threshold method, that cuts off all times where the noise threshold equals more than (by default) 80% of the highest amplitude noise sample for the length specified by lta (in seconds) + - A saturation threshold method, that cuts off all times where the noise threshold equals more than (by default) 99.5% of the highest amplitude noise sample. + + Parameters + ---------- + hvsr_data : dict, obspy.Stream, or obspy.Trace + Dictionary containing all the data and parameters for the HVSR analysis + remove_method : str, {'auto', 'manual', 'stalta'/'antitrigger', 'saturation threshold', 'noise threshold', 'warmup'/'cooldown'/'buffer'/'warm_cool'} + The different methods for removing noise from the dataset. A list of strings will also work, in which case, it should be a list of the above strings. See descriptions above for what how each method works. By default 'auto.' + If remove_method='auto', this is the equivalent of remove_method=['noise threshold', 'antitrigger', 'saturation threshold', 'warm_cool'] + processing_window : list, tuple, or None + A list/tuple of two items [s, e] or a list/tuple of two-item lists/tuples [[s0, e0], [s1,e1],...[sn, en]] with start and end time(s) for windows to *keep* for processing. + Data outside of these times will be excluded from processing. + Times should be obspy.UTCDateTime objects to ensure precision, but time strings ("13:05") will also work in most cases (excpetions may be when the data stream starts/ends on different UTC days) + sat_percent : float, default=0.995 + Percentage (between 0 and 1), to use as the threshold at which to remove data. This is used in the saturation method. By default 0.995. + If a value is passed that is greater than 1, it will be divided by 100 to obtain the percentage. + noise_percent : float, default = 0.8 + Percentage (between 0 and 1), to use as the threshold at which to remove data, if it persists for longer than time (in seconds (specified by min_win_size)). This is used in the noise threshold method. By default 0.8. + If a value is passed that is greater than 1, it will be divided by 100 to obtain the percentage. + sta : int, optional + Short term average (STA) window (in seconds), by default 2. For use with sta/lta antitrigger method. + lta : int, optional + Long term average (STA) window (in seconds), by default 30. For use with sta/lta antitrigger method. + stalta_thresh : list, default=[0.5,5] + Two-item list or tuple with the thresholds for the stalta antitrigger. The first value (index [0]) is the lower threshold, the second value (index [1] is the upper threshold), by default [0.5,5] + warmup_time : int, default=0 + Time in seconds to allow for warmup of the instrument (or while operator is still near instrument). This will renove any data before this time, by default 0. + cooldown_time : int, default=0 + Time in seconds to allow for cooldown of the instrument (or for when operator is nearing instrument). This will renove any data before this time, by default 0. + min_win_size : float, default=1 + The minumum size a window must be over specified threshold (in seconds) for it to be removed + remove_raw_noise : bool, default=False + If remove_raw_noise=True, will perform operation on raw data ('input_stream'), rather than potentially already-modified data ('stream'). + verbose : bool, default=False + Whether to print status of remove_noise + + Returns + ------- + output : dict + Dictionary similar to hvsr_data, but containing modified data with 'noise' removed + """ + #Get intput paramaters + orig_args = locals().copy() + start_time = datetime.datetime.now() + + # Update with processing parameters specified previously in input_params, if applicable + if 'processing_parameters' in hvsr_data.keys(): + if 'remove_noise' in hvsr_data['processing_parameters'].keys(): + update_msg = [] + for k, v in hvsr_data['processing_parameters']['remove_noise'].items(): + defaultVDict = dict(zip(inspect.getfullargspec(remove_noise).args[1:], + inspect.getfullargspec(remove_noise).defaults)) + # Manual input to function overrides the imported parameter values + if (not isinstance(v, (HVSRData, HVSRBatch))) and (k in orig_args.keys()) and (orig_args[k]==defaultVDict[k]): + update_msg.append(f'\t\t{k} = {v} (previously {orig_args[k]})') + orig_args[k] = v + + remove_method = orig_args['remove_method'] + processing_window = orig_args['processing_window'] + sat_percent = orig_args['sat_percent'] + noise_percent = orig_args['noise_percent'] + sta = orig_args['sta'] + lta = orig_args['lta'] + stalta_thresh = orig_args['stalta_thresh'] + warmup_time = orig_args['warmup_time'] + cooldown_time = orig_args['cooldown_time'] + min_win_size = orig_args['min_win_size'] + remove_raw_noise = orig_args['remove_raw_noise'] + verbose = orig_args['verbose'] + + if (verbose and isinstance(hvsr_data, HVSRBatch)) or (verbose and not hvsr_data['batch']): + if isinstance(hvsr_data, HVSRData) and hvsr_data['batch']: + pass + else: + print('\nRemoving noisy data windows (remove_noise())') + print('\tUsing the following parameters:') + for key, value in orig_args.items(): + if key=='hvsr_data': + pass + else: + print('\t {}={}'.format(key, value)) + print() + + if 'processing_parameters' in hvsr_data.keys() and 'remove_noise' in hvsr_data['processing_parameters'].keys(): + if update_msg != []: + update_msg.insert(0, '\tThe following parameters were updated using the processing_parameters attribute:') + for msg_line in update_msg: + print(msg_line) + print() + + # Set up lists + manualList = ['manual', 'man', 'm', 'window', 'windows', 'w'] + autoList = ['auto', 'automatic', 'all', 'a'] + antitrigger = ['stalta', 'anti', 'antitrigger', 'trigger', 'at'] + saturationThresh = ['saturation threshold', 'sat_thresh', 'sat thresh', 'saturation', 'sat', 's'] + noiseThresh = ['noise threshold', 'noise thresh', 'noise_thresh', 'noise', 'threshold', 'n'] + warmup_cooldown=['warmup', 'cooldown', 'warm', 'cool', 'buffer', 'warmup-cooldown', 'warmup_cooldown', 'wc', 'warm_cool', 'warm-cool'] + procWinList = ['processing_window', 'processing window', 'windows', 'window', 'win', 'pw'] + + # Do batch runs + if isinstance(hvsr_data, HVSRBatch): + #If running batch, we'll loop through each site + hvsr_out = {} + for site_name in hvsr_data.keys(): + args = orig_args.copy() #Make a copy so we don't accidentally overwrite + args['hvsr_data'] = hvsr_data[site_name] #Get what would normally be the "hvsr_data" variable for each site + if hvsr_data[site_name]['ProcessingStatus']['OverallStatus']: + try: + hvsr_out[site_name] = __remove_noise_batch(**args) #Call another function, that lets us run this function again + except Exception as e: + hvsr_out[site_name]['ProcessingStatus']['RemoveNoiseStatus']=False + hvsr_out[site_name]['ProcessingStatus']['OverallStatus']=False + if verbose: + print(e) + else: + hvsr_data[site_name]['ProcessingStatus']['RemoveNoiseStatus']=False + hvsr_data[site_name]['ProcessingStatus']['OverallStatus']=False + hvsr_out = hvsr_data + + output = HVSRBatch(hvsr_out) + return output + + if not isinstance(hvsr_data, (HVSRData, dict, obspy.Stream, obspy.Trace)): + warnings.warn(f"Input of type type(hvsr_data)={type(hvsr_data)} cannot be used.") + return hvsr_data + + # Which stream to use (input, or current) + if isinstance(hvsr_data, (HVSRData, dict)): + if remove_raw_noise: + inStream = hvsr_data['input_stream'].copy() + else: + inStream = hvsr_data['stream'].copy() + output = hvsr_data#.copy() + else: + inStream = hvsr_data.copy() + output = inStream.copy() + + outStream = inStream + + # Get remove_method into consistent format (list) + if isinstance(remove_method, str): + if ',' in remove_method: + remove_method = remove_method.split(',') + else: + remove_method = [remove_method] + elif isinstance(remove_method, (list, tuple)): + pass + elif not remove_method: + remove_method=[None] + else: + warnings.warn(f"Input value remove_method={remove_method} must be either string, list of strings, None, or False. No noise removal will be carried out. Please choose one of the following: 'manual', 'auto', 'antitrigger', 'noise threshold', 'warmup_cooldown'.") + return output + orig_removeMeth = remove_method + # Check if any parameter values are different from default (if they are, automatically add that method to remove_method) + rn_signature = inspect.signature(remove_noise) + + methodDict = {'antitrigger':['sta', 'lta', 'stalta_thresh', 'show_stalta_plot'], + 'sat_thresh':['sat_percent'], + 'noise_thresh':['noise_percent', 'min_win_size'], + 'warmup_cooldown':['warmup_time', 'cooldown_time'], + 'processing_window':['processing_window']} + + defaultValDict = {param.name: param.default for param in rn_signature.parameters.values() if param.default is not inspect.Parameter.empty} + + # If a non-default parameter is specified, add the method it corresponds to to remove_method + for key, def_val in defaultValDict.items(): + if key in orig_args: + if def_val != orig_args[key]: + for methodKey, methParamList in methodDict.items(): + if key in methParamList: + # Add the corresponding method to remove_mehtod if not already + if (methodKey not in remove_method) and ('auto' not in remove_method): + if remove_method == [None]: + remove_method = [methodKey] + else: + remove_method.append(methodKey) + + # Reorder list so manual is always first, if it is specified + do_manual = False + if len(set(remove_method).intersection(manualList)) > 0: + do_manual = True + manInd = list(set(remove_method).intersection(manualList))[0] + remove_method.remove(manInd) + remove_method.insert(0, manInd) + + # Reorder list so auto is always first (if no manual) or second (if manual) + # B/c if 'auto' is carried out, no other methods need to be carried out (repetitive) + newAutoInd = 0 + if do_manual: + newAutoInd = 1 + if len(set(remove_method).intersection(autoList)) > 0: + autoInd = list(set(remove_method).intersection(autoList))[0] + remove_method.remove(autoInd) + remove_method.insert(newAutoInd, autoInd) + + #Go through each type of removal and remove + if orig_removeMeth != remove_method: + if verbose: + print(f'\tThe remove_method parameter has been updated because non-default parameter values were detected.') + print(f'\tThe remove_method parameter was entered as {orig_removeMeth}, but has been updated to {remove_method}') + + # REMOVE DATA FROM ANALYSIS + for rem_kind in remove_method: + try: + if not rem_kind: + break + elif rem_kind.lower() in manualList: + if isinstance(output, (HVSRData, dict)): + if 'x_windows_out' in output.keys(): + pass + else: + output = _select_windows(output) + window_list = output['x_windows_out'] + if isinstance(outStream, obspy.core.stream.Stream): + if window_list is not None: + output['stream'] = __remove_windows(inStream, window_list, warmup_time) + else: + output = _select_windows(output) + elif isinstance(output, (HVSRData, dict)): + pass + else: + RuntimeError("Only obspy.core.stream.Stream data type is currently supported for manual noise removal method.") + elif rem_kind.lower() in autoList: + outStream = __remove_anti_stalta(outStream, sta=sta, lta=lta, thresh=stalta_thresh, show_stalta_plot=show_stalta_plot, verbose=verbose) + outStream = __remove_noise_thresh(outStream, noise_percent=noise_percent, lta=lta, min_win_size=min_win_size, verbose=verbose) + outStream = __remove_noise_saturate(outStream, sat_percent=sat_percent, min_win_size=min_win_size, verbose=verbose) + outStream = __remove_warmup_cooldown(stream=outStream, warmup_time=warmup_time, cooldown_time=cooldown_time, verbose=verbose) + # Break for-loop, since all the rest are already done as part of auto + break + elif rem_kind.lower() in antitrigger: + outStream = __remove_anti_stalta(outStream, sta=sta, lta=lta, thresh=stalta_thresh, show_stalta_plot=show_stalta_plot, verbose=verbose) + elif rem_kind.lower() in saturationThresh: + outStream = __remove_noise_saturate(outStream, sat_percent=sat_percent, min_win_size=min_win_size, verbose=verbose) + elif rem_kind.lower() in noiseThresh: + outStream = __remove_noise_thresh(outStream, noise_percent=noise_percent, lta=lta, min_win_size=min_win_size, verbose=verbose) + elif rem_kind.lower() in warmup_cooldown: + outStream = __remove_warmup_cooldown(stream=outStream, warmup_time=warmup_time, cooldown_time=cooldown_time, verbose=verbose) + elif rem_kind.lower() in procWinList: + outStream = _keep_processing_windows(stream=outStream, processing_window=processing_window, verbose=verbose) + else: + if len(remove_method)==1: + warnings.warn(f"Input value remove_method={remove_method} is not recognized. No noise removal will be carried out. Please choose one of the following: 'manual', 'auto', 'antitrigger', 'noise threshold', 'warmup_cooldown'.") + break + warnings.warn(f"Input value remove_method={remove_method} is not recognized. Continuing with other noise removal methods.") + except Exception as e: + print(f'\t *Error with {rem_kind} method. Data was not removed using that method.') + print(f'\t *{e}') + + + + # Add output + if isinstance(output, (HVSRData, dict)): + if isinstance(outStream, (obspy.Stream, obspy.Trace)): + output['stream_edited'] = outStream + else: + output['stream_edited'] = outStream['stream'] + output['input_stream'] = hvsr_data['input_stream'] + + if 'processing_parameters' not in output.keys(): + output['processing_parameters'] = {} + output['processing_parameters']['remove_noise'] = {} + for key, value in orig_args.items(): + output['processing_parameters']['remove_noise'][key] = value + + output['ProcessingStatus']['RemoveNoiseStatus'] = True + output = _check_processing_status(output, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose) + + output = __remove_windows_from_df(output, verbose=verbose) + + #if 'hvsr_windows_df' in output.keys() or ('params' in output.keys() and 'hvsr_windows_df' in output['params'].keys())or ('input_params' in output.keys() and 'hvsr_windows_df' in output['input_params'].keys()): + # hvsrDF = output['hvsr_windows_df'] + # + # outStream = output['stream_edited'].split() + # for i, trace in enumerate(outStream): + # if i == 0: + # trEndTime = trace.stats.endtime + # comp_end = trace.stats.component + # continue + # trStartTime = trace.stats.starttime + # comp_start = trace.stats.component + + # if trEndTime < trStartTime and comp_end == comp_start: + # gap = [trEndTime,trStartTime] + + # output['hvsr_windows_df']['Use'] = (hvsrDF['TimesProcessed_Obspy'].gt(gap[0]) & hvsrDF['TimesProcessed_Obspy'].gt(gap[1]) )| \ + # (hvsrDF['TimesProcessed_ObspyEnd'].lt(gap[0]) & hvsrDF['TimesProcessed_ObspyEnd'].lt(gap[1]))# | \ + # output['hvsr_windows_df']['Use'] = output['hvsr_windows_df']['Use'].astype(bool) + # + # trEndTime = trace.stats.endtime + # + # outStream.merge() + # output['stream_edited'] = outStream + + elif isinstance(hvsr_data, obspy.Stream) or isinstance(hvsr_data, obspy.Trace): + output = outStream + else: + warnings.warn(f"Output of type {type(output)} for this function will likely result in errors in other processing steps. Returning hvsr_data data.") + return hvsr_data + output = sprit_utils.make_it_classy(output) + if 'x_windows_out' not in output.keys(): + output['x_windows_out'] = [] + + return output
+ + + +# Remove outlier ppsds +
+[docs] +def remove_outlier_curves(hvsr_data, rmse_thresh=98, use_percentile=True, use_hv_curve=False, plot_engine='matplotlib', show_plot=False, verbose=False): + """Function used to remove outliers curves using Root Mean Square Error to calculate the error of each windowed + Probabilistic Power Spectral Density (PPSD) curve against the median PPSD value at each frequency step for all times. + It calculates the RMSE for the PPSD curves of each component individually. All curves are removed from analysis. + + Some abberant curves often occur due to the remove_noise() function, so this should be run some time after remove_noise(). + In general, the recommended workflow is to run this immediately following the generate_ppsds() function. + + Parameters + ---------- + hvsr_data : dict + Input dictionary containing all the values and parameters of interest + rmse_thresh : float or int, default=98 + The Root Mean Square Error value to use as a threshold for determining whether a curve is an outlier. + This averages over each individual entire curve so that curves with very abberant data (often occurs when using the remove_noise() method), can be identified. + Otherwise, specify a float or integer to use as the cutoff RMSE value (all curves with RMSE above will be removed) + use_percentile : float, default=True + Whether rmse_thresh should be interepreted as a raw RMSE value or as a percentile of the RMSE values. + use_hv_curve : bool, default=False + Whether to use the calculated HV Curve or the individual components. This can only be True after process_hvsr() has been run. + show_plot : bool, default=False + Whether to show a plot of the removed data + verbose : bool, default=False + Whether to print output of function to terminal + + Returns + ------- + hvsr_data : dict + Input dictionary with values modified based on work of function. + """ + # Setup function + #Get intput paramaters + orig_args = locals().copy() + start_time = datetime.datetime.now() + + # Update with processing parameters specified previously in input_params, if applicable + if 'processing_parameters' in hvsr_data.keys(): + if 'remove_outlier_curves' in hvsr_data['processing_parameters'].keys() and 'remove_noise' in hvsr_data['processing_parameters'].keys(): + update_msg = [] + for k, v in hvsr_data['processing_parameters']['remove_noise'].items(): + defaultVDict = dict(zip(inspect.getfullargspec(remove_outlier_curves).args[1:], + inspect.getfullargspec(remove_outlier_curves).defaults)) + # Manual input to function overrides the imported parameter values + if (not isinstance(v, (HVSRData, HVSRBatch))) and (k in orig_args.keys()) and (orig_args[k]==defaultVDict[k]): + update_msg.append(f'\t\t{k} = {v} (previously {orig_args[k]})') + orig_args[k] = v + + # Reset parameters in case of manual override of imported parameters + use_percentile = orig_args['use_percentile'] + rmse_thresh = orig_args['rmse_thresh'] + use_hv_curve = orig_args['use_hv_curve'] + show_plot = orig_args['show_plot'] + verbose = orig_args['verbose'] + + #Print if verbose, which changes depending on if batch data or not + if (verbose and isinstance(hvsr_data, HVSRBatch)) or (verbose and not hvsr_data['batch']): + if isinstance(hvsr_data, HVSRData) and hvsr_data['batch']: + pass + else: + print('\nRemoving outlier curves from further analysis (remove_outlier_curves())') + print('\tUsing the following parameters:') + for key, value in orig_args.items(): + if key == 'hvsr_data': + pass + else: + print('\t {}={}'.format(key, value)) + print() + + if 'processing_parameters' in hvsr_data.keys() and 'remove_outlier_curves' in hvsr_data['processing_parameters'].keys(): + if update_msg != []: + update_msg.insert(0, '\tThe following parameters were updated using the processing_parameters attribute:') + for msg_line in update_msg: + print(msg_line) + print() + + #First, divide up for batch or not + #Site is in the keys anytime it's not batch + if isinstance(hvsr_data, HVSRBatch): + #If running batch, we'll loop through each site + hvsr_out = {} + for site_name in hvsr_data.keys(): + args = orig_args.copy() #Make a copy so we don't accidentally overwrite + args['hvsr_data'] = hvsr_data[site_name] #Get what would normally be the "hvsr_data" variable for each site + if hvsr_data[site_name]['ProcessingStatus']['OverallStatus']: + try: + hvsr_out[site_name] = __remove_outlier_curves(**args) #Call another function, that lets us run this function again + except: + hvsr_out = hvsr_data + hvsr_out[site_name]['ProcessingStatus']['RemoveOutlierCurves'] = False + hvsr_out[site_name]['ProcessingStatus']['OverallStatus'] = False + else: + hvsr_out = hvsr_data + hvsr_out[site_name]['ProcessingStatus']['RemoveOutlierCurves'] = False + hvsr_out[site_name]['ProcessingStatus']['OverallStatus'] = False + hvsr_out = HVSRBatch(hvsr_out) + hvsr_out = _check_processing_status(hvsr_out, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose) + return hvsr_out + + #Create plot if designated + if not use_hv_curve: + compNames = ['Z', 'E', 'N'] + for col_name in hvsr_data['hvsr_windows_df'].columns: + if 'psd_values' in col_name and 'RMSE' not in col_name: + cName = col_name.split('_')[2] + if cName not in compNames: + compNames.append(cName) + col_prefix = 'psd_values_' + colNames = [col_prefix+cn for cn in compNames] + else: + compNames = [] + for col_name in hvsr_data['hvsr_windows_df'].columns: + if col_name.startswith('HV_Curves') and "Log10" not in col_name: + compNames.append(col_name) + colNames = compNames + col_prefix = 'HV_Curves' + if plot_engine.lower() == 'matplotlib': + spMosaic = [] + if use_hv_curve: + spMosaic.append(['HV Curve']) + fSize=(8.5, 6) + else: + for c in compNames: + spMosaic.append([c]) + fSize = (8.5, len(compNames) * 2) + + fig, ax = plt.subplot_mosaic(spMosaic, sharex=True, figsize=fSize) + fig.suptitle(f"{hvsr_data['site']}\nOutlier Curves to be Removed") + fig.set_layout_engine('constrained') + + # Loop through each component, and determine which curves are outliers + bad_rmse=[] + for i, column in enumerate(colNames): + if column in compNames: + if use_hv_curve == False: + column = col_prefix+column + else: + column = column + + # Retrieve data from dataframe (use all windows, just in case) + curr_data = np.stack(hvsr_data['hvsr_windows_df'][column]) + + # Calculate a median curve, and reshape so same size as original + medCurve = np.nanmedian(curr_data, axis=0) + medCurveArr = np.tile(medCurve, (curr_data.shape[0], 1)) + + # Calculate RMSE + rmse = np.sqrt(((np.subtract(curr_data, medCurveArr)**2).sum(axis=1))/curr_data.shape[1]) + hvsr_data['hvsr_windows_df']['RMSE_'+column] = rmse + if use_percentile is True: + rmse_threshold = np.percentile(rmse[~np.isnan(rmse)], rmse_thresh) + if verbose: + print(f'\tRMSE at {rmse_thresh}th percentile for {column} calculated at: {rmse_threshold:.2f}') + else: + rmse_threshold = rmse_thresh + + # Retrieve index of those RMSE values that lie outside the threshold + for j, curve in enumerate(curr_data): + if rmse[j] > rmse_threshold: + bad_rmse.append(j) + + # Show plot of removed/retained data + if use_hv_curve == False: + if plot_engine.lower() == 'matplotlib': + # Intialize to only get unique labels + rem_label_got = False + keep_label_got = False + + # Iterate through each curve to determine if it's rmse is outside threshold, for plot + for j, curve in enumerate(curr_data): + label=None + if rmse[j] > rmse_threshold: + linestyle = 'dashed' + linecolor='darkred' + alpha = 1 + linewidth = 1 + if not rem_label_got: + label='Removed Curve' + rem_label_got=True + else: + linestyle='solid' + linecolor = 'rosybrown' + alpha = 0.25 + linewidth=0.5 + if not keep_label_got: + keep_label_got=True + label='Retained Curve' + + # Plot each individual curve + if 'x_freqs' in hvsr_data.keys(): + ax[compNames[i]].plot(hvsr_data.x_freqs[compNames[i]], curve, linewidth=linewidth, c=linecolor, linestyle=linestyle, alpha=alpha, label=label) + else: + ax[compNames[i]].plot(1/hvsr_data.ppsds[compNames[i]]['period_bin_centers'], curve, linewidth=linewidth, c=linecolor, linestyle=linestyle, alpha=alpha, label=label) + + # Plot the median curve + if 'x_freqs' in hvsr_data.keys(): + ax[compNames[i]].plot(hvsr_data.x_freqs[compNames[i]], medCurve, linewidth=1, color='k', label='Median Curve') + else: + ax[compNames[i]].plot(1/hvsr_data.ppsds[compNames[i]]['period_bin_centers'],medCurve, linewidth=1, color='k', label='Median Curve') + + # Format axis + ax[compNames[i]].set_ylabel(f"{compNames[i]}") + ax[compNames[i]].legend(fontsize=10, labelspacing=0.1) + ax[compNames[i]].semilogx() + + if plot_engine.lower() == 'matplotlib': + hvsr_data['OutlierPlot'] = fig + if show_plot: + plt.show() + else: + plt.close() + elif plot_engine.lower() == 'plotly': + hvsr_data['OutlierPlot'] = sprit_plot.plot_outlier_curves(hvsr_data, rmse_thresh=rmse_thresh, use_percentile=use_percentile, use_hv_curve=use_hv_curve, from_roc=True, show_plot=show_plot, verbose=verbose) + else: + pass + + # Get unique values of bad_rmse indices and set the "Use" column of the hvsr_windows_df to False for that window + bad_rmse = np.unique(bad_rmse) + if len(bad_rmse) > 0: + + hvsr_data['hvsr_windows_df']['Use'] = hvsr_data['hvsr_windows_df']['Use'] * (rmse_threshold > hvsr_data['hvsr_windows_df']['RMSE_'+column]) + #hvsr_data['hvsr_windows_df'].loc[bad_index, "Use"] = False + + if verbose: + if len(bad_rmse)>0: + print(f"\n\t\tThe windows starting at the following times have been removed from further analysis ({len(bad_rmse)}/{hvsr_data['hvsr_windows_df'].shape[0]}):") + for b in hvsr_data['hvsr_windows_df'].index[pd.Series(bad_rmse)]: + print(f"\t\t {b}") + else: + print('\tNo outlier curves have been removed') + + hvsr_out = hvsr_data + + if 'processing_parameters' not in hvsr_out.keys(): + hvsr_out['processing_parameters'] = {} + hvsr_out['processing_parameters']['remove_outlier_curves'] = {} + for key, value in orig_args.items(): + hvsr_out['processing_parameters']['remove_outlier_curves'][key] = value + + hvsr_data['ProcessingStatus']['RemoveOutlierCurvesStatus'] = True + + hvsr_out = _check_processing_status(hvsr_out, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose) + + return hvsr_out
+ + + +# Just for testing +
+[docs] +def test_function(): + print('is this working?')
+ + + +# BATCH FUNCTIONS: various functions that are used to help the regular functions handle batch data +# Helper function for batch processing of check_peaks +def __check_peaks_batch(**check_peaks_kwargs): + try: + hvsr_data = check_peaks(**check_peaks_kwargs) + if check_peaks_kwargs['verbose']: + print('\t{} succesfully completed check_peaks()'.format(hvsr_data['input_params']['site'])) + except: + warnings.warn(f"Error in check_peaks({check_peaks_kwargs['hvsr_data']['input_params']['site']}, **check_peaks_kwargs)", RuntimeWarning) + hvsr_data = check_peaks_kwargs['hvsr_data'] + + return hvsr_data + + +# Support function for running batch +def __generate_ppsds_batch(**generate_ppsds_kwargs): + try: + params = generate_ppsds(**generate_ppsds_kwargs) + if generate_ppsds_kwargs['verbose']: + print('\t{} successfully completed generate_ppsds()'.format(params['site'])) + except Exception as e: + print(e) + warnings.warn(f"Error in generate_ppsds({generate_ppsds_kwargs['params']['site']}, **generate_ppsds_kwargs)", RuntimeWarning) + params = generate_ppsds_kwargs['params'] + + return params + + +# Helper function for batch processing of get_report +def __get_report_batch(**get_report_kwargs): + + try: + hvsr_results = get_report(**get_report_kwargs) + #Print if verbose, but selected report_formats was not print + print('\n\n\n') #add some 'whitespace' + if get_report_kwargs['verbose']: + if 'print' in get_report_kwargs['report_formats']: + pass + else: + get_report_kwargs['report_formats'] = 'print' + get_report(**get_report_kwargs) + + except: + warnMsg = f"Error in get_report({get_report_kwargs['hvsr_results']['input_params']['site']}, **get_report_kwargs)" + if get_report_kwargs['verbose']: + print('\t'+warnMsg) + else: + warnings.warn(warnMsg, RuntimeWarning) + hvsr_results = get_report_kwargs['hvsr_results'] + + return hvsr_results + + +# Helper function for batch procesing of azimuth +def __azimuth_batch(**azimuth_kwargs): + try: + hvsr_data = calculate_azimuth(**azimuth_kwargs) + + if azimuth_kwargs['verbose']: + if 'input_params' in hvsr_data.keys(): + print('\t{} successfully completed calculate_azimuth()'.format(hvsr_data['input_params']['site'])) + elif 'site' in hvsr_data.keys(): + print('\t{} successfully completed calculate_azimuth()'.format(hvsr_data['site'])) + except Exception as e: + warnings.warn(f"Error in calculate_azimuth({azimuth_kwargs['input']['site']}, **azimuth_kwargs)", RuntimeWarning) + + return hvsr_data + + +# Helper function for batch procesing of remove_noise +def __remove_noise_batch(**remove_noise_kwargs): + try: + hvsr_data = remove_noise(**remove_noise_kwargs) + + if remove_noise_kwargs['verbose']: + if 'input_params' in hvsr_data.keys(): + print('\t{} successfully completed remove_noise()'.format(hvsr_data['input_params']['site'])) + elif 'site' in hvsr_data.keys(): + print('\t{} successfully completed remove_noise()'.format(hvsr_data['site'])) + except Exception as e: + warnings.warn(f"Error in remove_noise({remove_noise_kwargs['input']['site']}, **remove_noise_kwargs)", RuntimeWarning) + + return hvsr_data + + +# Helper function batch processing of remove_outlier_curves +def __remove_outlier_curves(**remove_outlier_curves_kwargs): + try: + hvsr_data = remove_outlier_curves(**remove_outlier_curves_kwargs) + + if remove_outlier_curves_kwargs['verbose']: + if 'input_params' in hvsr_data.keys(): + print('\t{} successfully completed remove_outlier_curves()'.format(hvsr_data['input_params']['site'])) + elif 'site' in hvsr_data.keys(): + print('\t{} successfully completed remove_outlier_curves()'.format(hvsr_data['site'])) + except Exception as e: + warnings.warn(f"Error in remove_outlier_curves({remove_outlier_curves_kwargs['input']['site']}, **remove_outlier_curves_kwargs)", RuntimeWarning) + + return hvsr_data + + +# Batch function for plot_hvsr() +def __hvsr_plot_batch(**hvsr_plot_kwargs): + try: + hvsr_data = plot_hvsr(**hvsr_plot_kwargs) + except: + warnings.warn(f"Error in plotting ({hvsr_plot_kwargs['hvsr_data']['input_params']['site']}, **hvsr_plot_kwargs)", RuntimeWarning) + hvsr_data = hvsr_plot_kwargs['hvsr_data'] + + return hvsr_data + + +# Support function for batch of plot_azimuth() +def __plot_azimuth_batch(**plot_azimuth_kwargs): + try: + hvsr_data['Azimuth_Fig'] = plot_azimuth(**plot_azimuth_kwargs) + if plot_azimuth_kwargs['verbose']: + print('\t{} successfully completed plot_azimuth()'.format(hvsr_data['input_params']['site'])) + except: + errMsg = f"Error in plot_azimuth({plot_azimuth_kwargs['params']['site']}, **plot_azimuth_kwargs)" + if plot_azimuth_kwargs['verbose']: + print('\t'+errMsg) + else: + warnings.warn(errMsg, RuntimeWarning) + hvsr_data = plot_azimuth_kwargs['params'] + + return hvsr_data + + +# Helper function for batch version of process_hvsr() +def __process_hvsr_batch(**process_hvsr_kwargs): + try: + hvsr_data = process_hvsr(**process_hvsr_kwargs) + if process_hvsr_kwargs['verbose']: + print('\t{} successfully completed process_hvsr()'.format(hvsr_data['input_params']['site'])) + except: + errMsg=f"Error in process_hvsr({process_hvsr_kwargs['params']['site']}, **process_hvsr_kwargs)" + if process_hvsr_kwargs['verbose']: + print('\t'+errMsg) + else: + warnings.warn(errMsg, RuntimeWarning) + hvsr_data = process_hvsr_kwargs['params'] + + return hvsr_data + +# OTHER HELPER FUNCTIONS +# Special helper function that checks the processing status at each stage of processing to help determine if any processing steps were skipped +def _check_processing_status(hvsr_data, start_time=datetime.datetime.now(), func_name='', verbose=False): + """Internal function to check processing status, used primarily in the sprit.run() function to allow processing to continue if one site is bad. + + Parameters + ---------- + hvsr_data : sprit.HVSRData + Data being processed + + Returns + ------- + sprit.HVSRData + Data being processed, with updated the 'OverallStatus' key of the attribute ProcessingStatus updated. + """ + #Convert HVSRData to same format as HVSRBatch so same code works the same on both + if isinstance(hvsr_data, HVSRData): + siteName = hvsr_data['site'] + hvsr_interim = {siteName: hvsr_data} + else: + hvsr_interim = hvsr_data + + # Check overall processing status on all (or only 1 if HVSRData) site(s) + for sitename in hvsr_interim.keys(): + statusOK = True + for status_type, status_value in hvsr_interim[sitename]['ProcessingStatus'].items(): + if not status_value and (status_type != 'RemoveNoiseStatus' and status_type!='RemoveOutlierCurvesStatus'): + statusOK = False + + if statusOK: + hvsr_interim[sitename]['ProcessingStatus']['OverallStatus'] = True + else: + hvsr_interim[sitename]['ProcessingStatus']['OverallStatus'] = False + + # Get back original data in HVSRData format, if that was the input + if isinstance(hvsr_data, HVSRData): + hvsr_data = hvsr_interim[siteName] + + # Print how long it took to perform function + if verbose: + elapsed = (datetime.datetime.now()-start_time) + print(f"\t\t{func_name} completed in {str(elapsed)[:-3]}") + return hvsr_data + + +# HELPER functions for fetch_data() and get_metadata() +# Read in metadata .inv file, specifically for RaspShake +def _update_shake_metadata(filepath, params, write_path=''): + """Reads static metadata file provided for Rasp Shake and updates with input parameters. Used primarily in the get_metadata() function. + + PARAMETERS + ---------- + filepath : str or pathlib.Path object + Filepath to metadata file. Should be a file format supported by obspy.read_inventory(). + params : dict + Dictionary containing necessary keys/values for updating, currently only supported for STATIONXML with Raspberry Shakes. + Necessary keys: 'net', 'sta', + Optional keys: 'longitude', 'latitude', 'elevation', 'depth' + write_path : str, default='' + If specified, filepath to write to updated inventory file to. + + Returns + ------- + params : dict + Updated params dict with new key:value pair with updated updated obspy.inventory object (key="inv") + """ + + network = params['net'] + station = params['sta'] + optKeys = ['longitude', 'latitude', 'elevation', 'depth'] + for k in optKeys: + if k not in params.keys(): + params[k] = '0' + + wgs84_transformer = Transformer.from_crs(params['input_crs'], "4326") + + xcoord = str(params['longitude']) + ycoord = str(params['latitude']) + elevation = str(params['elevation']) + depth = str(params['depth']) + + + startdate = str(datetime.datetime(year=2023, month=2, day=15)) #First day sprit code worked :) + enddate=str(datetime.datetime.today()) + + filepath = sprit_utils.checkifpath(filepath) + tree = ET.parse(str(filepath)) + root = tree.getroot() + + prefix= "{http://www.fdsn.org/xml/station/1}" + + for item in root.iter(prefix+'Channel'): + item.attrib['startDate'] = startdate + item.attrib['endDate'] = enddate + + for item in root.iter(prefix+'Station'): + item.attrib['code'] = station + item.attrib['startDate'] = startdate + item.attrib['endDate'] = enddate + + for item in root.iter(prefix+'Network'): + item.attrib['code'] = network + + for item in root.iter(prefix+'Latitude'): + item.text = ycoord + + for item in root.iter(prefix+'Longitude'): + item.text = xcoord + + for item in root.iter(prefix+'Created'): + nowTime = str(datetime.datetime.now()) + item.text = nowTime + + for item in root.iter(prefix+'Elevation'): + item.text= elevation + + for item in root.iter(prefix+'Depth'): + item.text=depth + + #Set up (and) export + #filetag = '_'+str(datetime.datetime.today().date()) + #outfile = str(parentPath)+'\\'+filename+filetag+'.inv' + + if write_path != '': + try: + write_path = pathlib.Path(write_path) + if write_path.is_dir(): + fname = params['network']+'_'+params['station']+'_'+params['site'] + fname = fname + '_response.xml' + write_file = write_path.joinpath(fname) + else: + write_file=write_path + tree.write(write_file, xml_declaration=True, method='xml',encoding='UTF-8') + inv = obspy.read_inventory(write_file, format='STATIONXML', level='response') + except: + warnings.warn(f'write_path={write_path} is not recognized as a filepath, updated metadata file will not be written') + write_path='' + else: + try: + #Create temporary file for reading into obspy + tpf = tempfile.NamedTemporaryFile(delete=False) + stringRoot = ET.tostring(root, encoding='UTF-8', method='xml') + tpf.write(stringRoot) + + inv = obspy.read_inventory(tpf.name, format='STATIONXML', level='response') + tpf.close() + + os.remove(tpf.name) + except: + write_file = pathlib.Path(__file__).with_name('metadata.xml') + tree.write(write_file, xml_declaration=True, method='xml',encoding='UTF-8') + inv = obspy.read_inventory(write_file.as_posix(), format='STATIONXML', level='response') + os.remove(write_file.as_posix()) + + params['inv'] = inv + params['params']['inv'] = inv + return params + + +# Support function for get_metadata() +def _read_RS_Metadata(params, source=None): + """Function to read the metadata from Raspberry Shake using the StationXML file provided by the company. + Intended to be used within the get_metadata() function. + + Parameters + ---------- + params : dict + The parameter dictionary output from input_params() and read into get_metadata() + + Returns + ------- + params : dict + Further modified parameter dictionary + """ + if 'inv' in params.keys(): + inv = params['inv'] + else: + sprit_utils.checkifpath(params['metapath']) + inv = obspy.read_inventory(params['metapath'], format='STATIONXML', level='response') + params['inv'] = inv + + station = params['sta'] + network = params['net'] + channels = params['cha'] + + if isinstance(inv, obspy.core.inventory.inventory.Inventory): + #Create temporary file from inventory object + tpf = tempfile.NamedTemporaryFile(delete=False) + inv.write(tpf.name, format='STATIONXML') + + #Read data into xmlTree + tree = ET.parse(tpf.name) + root = tree.getroot() + + #Close and remove temporary file + tpf.close() + os.remove(tpf.name) + else: + inv = sprit_utils.checkifpath(inv) + inv = obspy.read_inventory(params['metapath'], format='STATIONXML', level='response') + params['inv'] = inv + tree = ET.parse(inv) + root = tree.getroot() + + #if write_path != '': + # inv.write(write_path, format='STATIONXML') + + #This is specific to RaspShake + c=channels[0] + pzList = [str(n) for n in list(range(7))] + s=pzList[0] + + prefix= "{http://www.fdsn.org/xml/station/1}" + + sensitivityPath = "./"+prefix+"Network[@code='"+network+"']/"+prefix+"Station[@code='"+station+"']/"+prefix+"Channel[@code='"+c+"']/"+prefix+"Response/"+prefix+"InstrumentSensitivity/"+prefix+"Value" + gainPath = "./"+prefix+"Network[@code='"+network+"']/"+prefix+"Station[@code='"+station+"']/"+prefix+"Channel[@code='"+c+"']/"+prefix+"Response/"+prefix+"Stage[@number='1']/"+prefix+"StageGain/"+prefix+"Value" + + #paz = [] + rsCList = ['EHZ', 'EHN', 'EHE'] + paz = {} + for c in channels: + channelPaz = {} + #channelPaz['channel'] = c + for item in root.findall(sensitivityPath): + channelPaz['sensitivity']=float(item.text) + + for item in root.findall(gainPath): + channelPaz['gain']=float(item.text) + + poleList = [] + zeroList = [] + for s in pzList: + if int(s) < 4: + polePathReal = "./"+prefix+"Network[@code='"+network+"']/"+prefix+"Station[@code='"+station+"']/"+prefix+"Channel[@code='"+c+"']/"+prefix+"Response/"+prefix+"Stage[@number='1']/"+prefix+"PolesZeros/"+prefix+"Pole[@number='"+s+"']/"+prefix+"Real" + polePathImag = "./"+prefix+"Network[@code='"+network+"']/"+prefix+"Station[@code='"+station+"']/"+prefix+"Channel[@code='"+c+"']/"+prefix+"Response/"+prefix+"Stage[@number='1']/"+prefix+"PolesZeros/"+prefix+"Pole[@number='"+s+"']/"+prefix+"Imaginary" + for poleItem in root.findall(polePathReal): + poleReal = poleItem.text + for poleItem in root.findall(polePathImag): + pole = complex(float(poleReal), float(poleItem.text)) + poleList.append(pole) + channelPaz['poles'] = poleList + #channelPaz['poles'] = list(set(poleList)) + else: + zeroPathReal = "./"+prefix+"Network[@code='"+network+"']/"+prefix+"Station[@code='"+station+"']/"+prefix+"Channel[@code='"+c+"']/"+prefix+"Response/"+prefix+"Stage[@number='1']/"+prefix+"PolesZeros/"+prefix+"Zero[@number='"+s+"']/"+prefix+"Real" + zeroPathImag = "./"+prefix+"Network[@code='"+network+"']/"+prefix+"Station[@code='"+station+"']/"+prefix+"Channel[@code='"+c+"']/"+prefix+"Response/"+prefix+"Stage[@number='1']/"+prefix+"PolesZeros/"+prefix+"Zero[@number='"+s+"']/"+prefix+"Imaginary" + for zeroItem in root.findall(zeroPathReal): + zeroReal = zeroItem.text + + for zeroItem in root.findall(zeroPathImag): + zero = complex(float(zeroReal), float(zeroItem.text)) + #zero = zeroReal + "+" + zeroItem.text+'j' + zeroList.append(zero) + #channelPaz['zeros'] = list(set(zeroList)) + channelPaz['zeros'] = zeroList + if str(c).upper() in rsCList: + c = str(c)[-1].upper() + paz[str(c)] = channelPaz + params['paz'] = paz + params['params']['paz'] = paz + + return params + + +# Helper function to sort channels +def _sort_channels(input, source, verbose): + if source!='batch': + input = {'SITENAME': {'stream':input}} #Make same structure as batch + + for site in input.keys(): + rawDataIN = input[site]['stream'] + + if rawDataIN is None: + if verbose: + raise RuntimeError("No data was read using specified parameters {}".format(input[site])) + else: + raise RuntimeError("No data was read using specified parameters") + + elif isinstance(rawDataIN, obspy.core.stream.Stream): + #Make sure z component is first + dataIN = rawDataIN.sort(['channel'], reverse=True) #z, n, e order + else: + #Not usually used anymore, retained just in case + dataIN = [] + for i, st in enumerate(rawDataIN): + if 'Z' in st[0].stats['channel']:#).split('.')[3]:#[12:15]: + dataIN.append(rawDataIN[i]) + else: + dataIN.append(rawDataIN[i].sort(['channel'], reverse=True)) #z, n, e order + + input[site]['stream'] = dataIN + + if source=='batch': + #Return a dict + output = input + else: + #Return a stream otherwise + output = input[site]['stream'] + return output + + +# Trim data +def _trim_data(input, stream=None, export_dir=None, data_export_format=None, source=None, **kwargs): + """Function to trim data to start and end time + + Trim data to start and end times so that stream being analyzed only contains wanted data. + Can also export data to specified directory using a specified site name and/or data_export_format + + Parameters + ---------- + input : HVSRData + HVSR Data class containing input parameters for trimming + stream : obspy.stream object + Obspy stream to be trimmed + export_dir: str or pathlib obj + Output filepath to export trimmed data to. If not specified, does not export. + data_export_format : str or None, default=None + If None, and export_dir is specified, format defaults to .mseed. Otherwise, exports trimmed stream using obspy.core.stream.Stream.write() method, with data_export_format being passed to the format argument. + https://docs.obspy.org/packages/autogen/obspy.core.stream.Stream.write.html#obspy.core.stream.Stream.write + **kwargs + Keyword arguments passed directly to obspy.core.stream.Stream.trim() method. + + Returns + ------- + st_trimmed : obspy.stream object + Obpsy Stream trimmed to start and end times + """ + #if source!='batch': + # #input = {'SITENAME': {'stream':input}} #Make same structure as batch + # pass + + if 'starttime' in kwargs.keys(): + start = kwargs['starttime'] + elif isinstance(input, (HVSRData, dict)): + start = input['starttime'] + + if 'endtime' in kwargs.keys(): + end = kwargs['endtime'] + else: + end = input['endtime'] + + if 'site' in kwargs.keys(): + site = kwargs['site'] + else: + site = input['site'] + + if stream is not None: + st_trimmed = stream.copy() + elif 'stream' in input.keys(): + st_trimmed = input['stream'].copy() + else: + raise UnboundLocalError("stream not specified. Must either be specified using stream parameter or as a key in the input parameters (input['stream'])") + + trimStart = obspy.UTCDateTime(start) + trimEnd = obspy.UTCDateTime(end) + + #If data is contained in a masked array, split to undo masked array + if isinstance(st_trimmed[0].data, np.ma.masked_array): + st_trimmed = st_trimmed.split() + #This split is undone with the .merge() method a few lines down + + for tr in st_trimmed: + if trimStart > tr.stats.endtime or trimEnd < tr.stats.starttime: + pass + else: + st_trimmed.trim(starttime=trimStart, endtime=trimEnd, **kwargs) + + st_trimmed.merge(method=1) + + if data_export_format is None: + data_export_format = '.mseed' + + #Format export filepath, if exporting + if export_dir is not None: + if site is None: + site='' + else: + site = site+'_' + if '.' not in data_export_format: + data_export_format = '.'+data_export_format + net = st_trimmed[0].stats.network + sta = st_trimmed[0].stats.station + loc = st_trimmed[0].stats.location + yr = str(st_trimmed[0].stats.starttime.year) + strtD=str(st_trimmed[0].stats.starttime.date) + strtT=str(st_trimmed[0].stats.starttime.time)[0:2] + strtT=strtT+str(st_trimmed[0].stats.starttime.time)[3:5] + endT = str(st_trimmed[0].stats.endtime.time)[0:2] + endT = endT+str(st_trimmed[0].stats.endtime.time)[3:5] + doy = str(st_trimmed[0].stats.starttime.utctimetuple().tm_yday).zfill(3) + + export_dir = sprit_utils.checkifpath(export_dir) + export_dir = str(export_dir) + export_dir = export_dir.replace('\\', '/') + export_dir = export_dir.replace('\\'[0], '/') + + if type(data_export_format) is str: + filename = site+net+'.'+sta+'.'+loc+'.'+yr+'.'+doy+'_'+strtD+'_'+strtT+'-'+endT+data_export_format + elif type(data_export_format) is bool: + filename = site+net+'.'+sta+'.'+loc+'.'+yr+'.'+doy+'_'+strtD+'_'+strtT+'-'+endT+'.mseed' + + if export_dir[-1]=='/': + export_dir=export_dir[:-1] + + exportFile = export_dir+'/'+filename + + #Take care of masked arrays for writing purposes + if 'fill_value' in kwargs.keys(): + for tr in st_trimmed: + if isinstance(tr.data, np.ma.masked_array): + tr.data = tr.data.filled(kwargs['fill_value']) + else: + st_trimmed = st_trimmed.split() + + st_trimmed.write(filename=exportFile) + else: + pass + + return st_trimmed + + +# Helper function to detrend data +def __detrend_data(input, detrend, detrend_order, verbose, source): + """Helper function to detrend data, specifically formatted for the HVSRData and HVSRBatch objects""" + if source != 'batch': + input = {'SITENAME': {'stream':input}} #Make same structure as batch + + for key in input.keys(): + dataIN = input[key]['stream'] + if detrend==False: + pass + elif detrend==True: + #By default, do a spline removal + for tr in dataIN: + tr.detrend(type='spline', order=detrend_order, dspline=1000) + else: + data_undetrended = dataIN.copy() + try: + if str(detrend).lower()=='simple': + for tr in dataIN: + tr.detrend(type=detrend) + if str(detrend).lower()=='linear': + for tr in dataIN: + tr.detrend(type=detrend) + if str(detrend).lower()=='constant' or detrend=='demean': + for tr in dataIN: + tr.detrend(type=detrend) + if str(detrend).lower()=='polynomial': + for tr in dataIN: + tr.detrend(type=detrend, order=detrend_order) + if str(detrend).lower()=='spline': + for tr in dataIN: + tr.detrend(type=detrend, order=int(detrend_order), dspline=1000) + except: + dataIN = data_undetrended + if verbose: + warnings.warn("Detrend error, data not detrended", UserWarning) + + input[key]['stream'] = dataIN + + if source=='batch': + #Return a dict + output = input + else: + #Return a stream otherwise + output = input[key]['stream'] + return output + + +# Read data from raspberry shake +def __read_RS_file_struct(input_data, source, year, doy, inv, params, verbose=False): + """"Private function used by fetch_data() to read in Raspberry Shake data""" + from obspy.core import UTCDateTime + fileList = [] + folderPathList = [] + filesinfolder = False + input_data = sprit_utils.checkifpath(input_data) + #Read RS files + if source=='raw': #raw data with individual files per trace + if input_data.is_dir(): + for child in input_data.iterdir(): + if child.is_file() and child.name.startswith('AM') and str(doy).zfill(3) in child.name and str(year) in child.name: + filesinfolder = True + folderPathList.append(input_data) + fileList.append(child) + elif child.is_dir() and child.name.startswith('EH') and not filesinfolder: + folderPathList.append(child) + for c in child.iterdir(): + if c.is_file() and c.name.startswith('AM') and c.name.endswith(str(doy).zfill(3)) and str(year) in c.name: + fileList.append(c) + + + if len(fileList) == 0: + doyList = [] + printList= [] + for j, folder in enumerate(folderPathList): + for i, file in enumerate(folder.iterdir()): + if j ==0: + doyList.append(str(year) + ' ' + str(file.name[-3:])) + printList.append(f"{datetime.datetime.strptime(doyList[i], '%Y %j').strftime('%b %d')} | Day of year: {file.name[-3:]}") + if len(printList) == 0: + warnings.warn('No files found matching Raspberry Shake data structure or files in specified directory.') + else: + warnings.warn(f'No file found for specified date: {params["acq_date"]}. The following days/files exist for specified year in this directory') + for p in printList: + print('\t',p) + return None + elif len(fileList) !=3: + warnings.warn('3 channels needed! {} found.'.format(len(folderPathList)), UserWarning) + else: + fileList.sort(reverse=True) # Puts z channel first + folderPathList.sort(reverse=True) + if verbose: + print('Reading files: \n\t{}\n\t{}\n\t{}'.format(fileList[0].name, fileList[1].name, fileList[2].name)) + + traceList = [] + for i, f in enumerate(fileList): + with warnings.catch_warnings(): + warnings.filterwarnings(action='ignore', message='^readMSEEDBuffer()') + st = obspy.read(str(f))#, starttime=UTCDateTime(params['starttime']), endtime=UTCDateTime(params['endtime']), nearest_sample=False) + st = st.split() + st.trim(starttime=UTCDateTime(params['starttime']), endtime=UTCDateTime(params['endtime']), nearest_sample=False) + st.merge() + tr = (st[0]) + #tr= obspy.Trace(tr.data,header=meta) + traceList.append(tr) + rawDataIN = obspy.Stream(traceList) + with warnings.catch_warnings(): + warnings.filterwarnings(action='ignore', message='Found more than one matching response.*') + rawDataIN.attach_response(inv) + else: + rawDataIN = obspy.read(str(input_data), starttime=UTCDateTime(params['starttime']), endttime=UTCDateTime(params['endtime']), nearest_sample=True) + rawDataIN.attach_response(inv) + elif source=='dir': #files with 3 traces, but may be several in a directory or only directory name provided + obspyFormats = ['AH','ALSEP_PSE','ALSEP_WTH','ALSEP_WTN','CSS','DMX','GCF','GSE1','GSE2','KINEMETRICS_EVT','MSEED','NNSA_KB_CORE','PDAS','PICKLE','Q','REFTEK130','RG16','SAC','SACXY','SEG2','SEGY','SEISAN','SH_ASC','SLIST','SU','TSPAIR','WAV','WIN','Y'] + for file in input_data.iterdir(): + ext = file.suffix[1:] + rawFormat = False + if ext.isnumeric(): + if float(ext) >= 0 and float(ext) < 367: + rawFormat=True + + if ext.upper() in obspyFormats or rawFormat: + filesinfolder = True + folderPathList.append(input_data) + fileList.append(file.name) + + filepaths = [] + rawDataIN = obspy.Stream() + for i, f in enumerate(fileList): + filepaths.append(folderPathList[i].joinpath(f)) + #filepaths[i] = pathlib.Path(filepaths[i]) + currData = obspy.read(filepaths[i]) + currData.merge() + #rawDataIN.append(currData) + #if i == 0: + # rawDataIN = currData.copy() + if isinstance(currData, obspy.core.stream.Stream): + rawDataIN += currData.copy() + #rawDataIN = obspy.Stream(rawDataIN) + rawDataIN.attach_response(inv) + if type(rawDataIN) is list and len(rawDataIN)==1: + rawDataIN = rawDataIN[0] + elif source=='file': + rawDataIN = obspy.read(str(input_data), starttime=UTCDateTime(params['starttime']), endttime=UTCDateTime(params['endtime']), nearest=True) + rawDataIN.merge() + rawDataIN.attach_response(inv) + elif type(source) is list or type(input_data) is list: + pass #Eventually do something + rawDataIN.attach_response(inv) + + return rawDataIN + + +# Helper functions for remove_noise() +# Helper function for removing gaps +def __remove_gaps(stream, window_gaps_obspy): + """Helper function for removing gaps""" + + # combine overlapping windows + overlapList = [] + for i in range(len(window_gaps_obspy)-2): + if window_gaps_obspy[i][1] > window_gaps_obspy[i+1][0]: + overlapList.append(i) + + for i, t in enumerate(overlapList): + if i < len(window_gaps_obspy)-2: + window_gaps_obspy[i][1] = window_gaps_obspy[i+1][1] + window_gaps_obspy.pop(i+1) + + # Add streams + window_gaps_s = [] + for w, win in enumerate(window_gaps_obspy): + if w == 0: + pass + elif w == len(window_gaps_obspy)-1: + pass + else: + window_gaps_s.append(win[1]-win[0]) + + if len(window_gaps_s) > 0: + stream_windows = [] + j = 0 + for i, window in enumerate(window_gaps_s): + j=i + newSt = stream.copy() + stream_windows.append(newSt.trim(starttime=window_gaps_obspy[i][1], endtime=window_gaps_obspy[i+1][0])) + i = j + 1 + newSt = stream.copy() + stream_windows.append(newSt.trim(starttime=window_gaps_obspy[i][1], endtime=window_gaps_obspy[i+1][0])) + + for i, st in enumerate(stream_windows): + if i == 0: + outStream = st.copy() + else: + newSt = st.copy() + gap = window_gaps_s[i-1] + outStream = outStream + newSt.trim(starttime=st[0].stats.starttime - gap, pad=True, fill_value=None) + outStream.merge() + else: + outStream = stream.copy() + + return outStream + + +# Helper function for getting windows to remove noise using stalta antitrigger method +def __remove_anti_stalta(stream, sta, lta, thresh, show_stalta_plot=False, verbose=False): + """Helper function for getting windows to remove noise using stalta antitrigger method + + Parameters + ---------- + stream : obspy.core.stream.Stream object + Input stream on which to perform noise removal + sta : int + Number of seconds to use as short term window, reads from remove_noise() function. + lta : int + Number of seconds to use as long term window, reads from remove_noise() function. + thresh : list + Two-item list or tuple with the thresholds for the stalta antitrigger. + Reads from remove_noise() function. The first value (index [0]) is the lower threshold (below which trigger is deactivated), + the second value (index [1] is the upper threshold (above which trigger is activated)), by default [8, 8] + show_plot : bool + If True, will plot the trigger and stalta values. Reads from remove_noise() function, by default False. + + Returns + ------- + outStream : obspy.core.stream.Stream object + Stream with a masked array for the data where 'noise' has been removed + + """ + from obspy.signal.trigger import classic_sta_lta + + if verbose: + print(f'\tRemoving noise using sta/lta antitrigger method: sta={sta}, lta={lta}, stalta_thresh={thresh}') + sampleRate = float(stream[0].stats.delta) + + sta_samples = sta / sampleRate #Convert to samples + lta_samples = lta / sampleRate #Convert to samples + staltaStream = stream.copy() + cFunList = [] + + for t, tr in enumerate(staltaStream): + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', category=UserWarning) + cFunList.append(classic_sta_lta(tr, nsta=sta_samples, nlta=lta_samples)) + + if show_stalta_plot is True: + obspy.signal.trigger.plot_trigger(tr, cFunList[0], thresh[1], thresh[0]) + elif type(show_stalta_plot) is int: + obspy.signal.trigger.plot_trigger(tr, cFunList[show_stalta_plot], thresh[1], thresh[0]) + + windows_samples = [] + for t, cf in enumerate(cFunList): + if len(obspy.signal.trigger.trigger_onset(cf, thresh[1], thresh[0])) > 0: + windows_samples.extend(obspy.signal.trigger.trigger_onset(cf, thresh[1], thresh[0]).tolist()) + def condense_window_samples(win_samples): + # Sort the list of lists based on the first element of each internal list + sorted_list = sorted(win_samples, key=lambda x: x[0]) + + # Initialize an empty result list + result = [] + if len(win_samples) == 0: + return result + # Initialize variables to track the current range + start, end = sorted_list[0] + + # Iterate over the sorted list + for i in range(1, len(sorted_list)): + current_start, current_end = sorted_list[i] + + # If the current range overlaps with the previous range + if current_start <= end: + # Update the end of the current range + end = max(end, current_end) + else: + # Add the previous range to the result and update the current range + result.append([start, end]) + start, end = current_start, current_end + + # Add the last range to the result + result.append([start, end]) + + return result + windows_samples = condense_window_samples(windows_samples) + + startT = stream[0].stats.starttime + endT = stream[0].stats.endtime + window_UTC = [] + window_MPL = [] + window_UTC.append([startT, startT]) + for w, win in enumerate(windows_samples): + for i, t in enumerate(win): + if i == 0: + window_UTC.append([]) + window_MPL.append([]) + trigShift = sta + if trigShift > t * sampleRate: + trigShift = 0 + tSec = t * sampleRate - trigShift + window_UTC[w+1].append(startT+tSec) + window_MPL[w].append(window_UTC[w][i].matplotlib_date) + + window_UTC.append([endT, endT]) + #window_MPL[w].append(window_UTC[w][i].matplotlib_date) + outStream = __remove_gaps(stream, window_UTC) + return outStream + + +# Remove noise saturation +def __remove_noise_saturate(stream, sat_percent, min_win_size, verbose=False): + """Function to remove "saturated" data points that exceed a certain percent (sat_percent) of the maximum data value in the stream. + + Parameters + ---------- + stream : obspy.Stream + Obspy Stream of interest + sat_percent : float + Percentage of the maximum amplitude, which will be used as the saturation threshold above which data points will be excluded + min_win_size : float + The minumum size a window must be (in seconds) for it to be removed + + Returns + ------- + obspy.Stream + Stream with masked array (if data removed) with "saturated" data removed + """ + if verbose: + print(f'\tRemoving noise using noise saturation method: sat_percent={sat_percent}, min_win_size={min_win_size}') + if sat_percent > 1: + sat_percent = sat_percent / 100 + + removeInd = np.array([], dtype=int) + for trace in stream: + dataArr = trace.data.copy() + + sample_rate = trace.stats.delta + + #Get max amplitude value + maxAmp = np.max(np.absolute(dataArr, where = not None)) + thresholdAmp = maxAmp * sat_percent + cond = np.nonzero(np.absolute(dataArr, where=not None) > thresholdAmp)[0] + removeInd = np.hstack([removeInd, cond]) + #trace.data = np.ma.where(np.absolute(data, where = not None) > (noise_percent * maxAmp), None, data) + #Combine indices from all three traces + removeInd = np.unique(removeInd) + + removeList = [] # initialize + min_win_samples = int(min_win_size / sample_rate) + + if len(removeInd) > 0: + startInd = removeInd[0] + endInd = removeInd[0] + + for i in range(0, len(removeInd)): + if removeInd[i] - removeInd[i-1] > 1: + if endInd - startInd >= min_win_samples: + removeList.append([int(startInd), int(endInd)]) + startInd = removeInd[i] + endInd = removeInd[i] + + removeList.append([-1, -1]) #figure out a way to get rid of this + + #Convert removeList from samples to seconds after start to UTCDateTime + sampleRate = stream[0].stats.delta + startT = stream[0].stats.starttime + endT = stream[0].stats.endtime + removeSec = [] + removeUTC = [] + removeUTC.append([startT, startT]) + for i, win in enumerate(removeList): + removeSec.append(list(np.round(sampleRate * np.array(win),6))) + removeUTC.append(list(np.add(startT, removeSec[i]))) + removeUTC[-1][0] = removeUTC[-1][1] = endT + + outstream = __remove_gaps(stream, removeUTC) + return outstream + + +# Helper function for removing data using the noise threshold input from remove_noise() +def __remove_noise_thresh(stream, noise_percent=0.8, lta=30, min_win_size=1, verbose=False): + """Helper function for removing data using the noise threshold input from remove_noise() + + The purpose of the noise threshold method is to remove noisy windows (e.g., lots of traffic all at once). + + This function uses the lta value (which can be specified here), and finds times where the lta value is at least at the noise_percent level of the max lta value for at least a specified time (min_win_size) + + Parameters + ---------- + stream : obspy.core.stream.Stream object + Input stream from which to remove windows. Passed from remove_noise(). + noise_percent : float, default=0.995 + Percentage (between 0 and 1), to use as the threshold at which to remove data. This is used in the noise threshold method. By default 0.995. + If a value is passed that is greater than 1, it will be divided by 100 to obtain the percentage. Passed from remove_noise(). + lta : int, default = 30 + Length of lta to use (in seconds) + min_win_size : int, default = 1 + Minimum amount of time (in seconds) at which noise is above noise_percent level. + + Returns + ------- + outStream : obspy.core.stream.Stream object + Stream with a masked array for the data where 'noise' has been removed. Passed to remove_noise(). + """ + if verbose: + print(f'\tRemoving noise using continuous noise threshold method: sat_percent={noise_percent}, lta={lta}') + if noise_percent > 1: + noise_percent = noise_percent / 100 + + removeInd = np.array([], dtype=int) + for trace in stream: + dataArr = trace.data.copy() + + sample_rate = trace.stats.delta + lta_samples = int(lta / sample_rate) + + #Get lta values across traces data + window_size = lta_samples + if window_size == 0: + window_size = 1 + kernel = np.ones(window_size) / window_size + maskedArr = np.ma.array(dataArr, dtype=float, fill_value=None) + ltaArr = np.convolve(maskedArr, kernel, mode='same') + #Get max lta value + maxLTA = np.max(ltaArr, where = not None) + cond = np.nonzero(np.absolute(ltaArr, where=not None) > (noise_percent * maxLTA))[0] + removeInd = np.hstack([removeInd, cond]) + #trace.data = np.ma.where(np.absolute(data, where = not None) > (noise_percent * maxAmp), None, data) + #Combine indices from all three traces + removeInd = np.unique(removeInd) + + # Make sure we're not removing single indices (we only want longer than min_win_size) + removeList = [] # initialize + min_win_samples = int(min_win_size / sample_rate) + + if len(removeInd) > 0: + startInd = removeInd[0] + endInd = removeInd[0] + + for i in range(0, len(removeInd)): + #If indices are non-consecutive... + if removeInd[i] - removeInd[i-1] > 1: + #If the indices are non-consecutive and the + if endInd - startInd >= min_win_samples: + removeList.append([int(startInd), int(endInd)]) + + #Set startInd as the current index + startInd = removeInd[i] + endInd = removeInd[i] + + removeList.append([-1, -1]) + + sampleRate = stream[0].stats.delta + startT = stream[0].stats.starttime + endT = stream[0].stats.endtime + removeSec = [] + removeUTC = [] + + removeUTC.append([startT, startT]) + for i, win in enumerate(removeList): + removeSec.append(list(np.round(sampleRate * np.array(win),6))) + removeUTC.append(list(np.add(startT, removeSec[i]))) + removeUTC[-1][0] = removeUTC[-1][1] = endT + + outstream = __remove_gaps(stream, removeUTC) + + return outstream + + +# Helper function for removing data during warmup (when seismometers are still initializing) and "cooldown" (when there may be noise from deactivating seismometer) time, if desired +def __remove_warmup_cooldown(stream, warmup_time = 0, cooldown_time = 0, verbose=False): + """Private helper function to remove data from the start and/or end of each site + + Parameters + ---------- + stream : obspy.Stream() + Input stream to use for analysis for noise removal + warmup_time : int, optional + Time in seconds at the start of the record to remove from analysis, by default 0 + cooldown_time : int, optional + Time in seconds at the end of the record to remove from analysis, by default 0 + verbose : bool, optional + Whether to print information about the process to the terminal, by default False + + Returns + ------- + obspy.Stream() + obspy.Stream() with masked arrays for the data where removed/kept. + """ + if verbose: + print(f"\tRemoving noise using warmup/cooldown buffers: warmup_time={warmup_time} s, cooldown_time={cooldown_time} s ") + sampleRate = float(stream[0].stats.delta) + outStream = stream.copy() + + warmup_samples = int(warmup_time / sampleRate) #Convert to samples + windows_samples=[] + for tr in stream: + totalSamples = len(tr.data)-1#float(tr.stats.endtime - tr.stats.starttime) / tr.stats.delta + cooldown_samples = int(totalSamples - (cooldown_time / sampleRate)) #Convert to samples + + # Initiate list with warmup and cooldown samples + windows_samples = [[0, warmup_samples],[cooldown_samples, totalSamples]] + + # Remove cooldown and warmup samples if there is none indicated (default of 0 for both) + if cooldown_time == 0: + windows_samples.pop(1) + if warmup_time == 0: + windows_samples.pop(0) + + + if windows_samples == []: + # If no warmup or cooldown indicated, don't do anything + pass + else: + # Otherwise, get the actual starttime (UTCDateTime) + startT = stream[0].stats.starttime + endT = stream[-1].stats.endtime + window_UTC = [] + window_MPL = [] + + print("warmup starttime", startT) + # Initiate list with starttimes + for w, win in enumerate(windows_samples): + # win is a list with start/end time for each buffer, in samples + for j, tm in enumerate(win): + # For each side (warmup or cooldown), add a new item + # There will be 2 list items for warmup, 2 for cooldown (extra is for "padding") + if j == 0: + window_UTC.append([]) + window_MPL.append([]) + tSec = tm * sampleRate + + # Get the UTC time for the new item + window_UTC[w].append(startT+tSec) + window_MPL[w].append(window_UTC[w][j].matplotlib_date) + # "pad" list with endtime + window_UTC.insert(0, [startT, startT]) + window_UTC.append([endT, endT]) + + outStream = __remove_gaps(stream, window_UTC) + + return outStream + + +# Helper function for selecting windows +def _keep_processing_windows(stream, processing_window=[":"], verbose=False): + """Keep processing windows + + Parameters + ---------- + stream : obspy.Stream() + Stream + processing_window : list, optional + Processing window list, by default [":"] + verbose : bool, optional + Whether to print information about the removal to the terminal + + Returns + ------- + obspy.Stream() + Obspy stream object with selected windows retained and all else removed + """ + + if verbose: + print(f"\tRemoving noise outside the indicated processing window(s): processing_window={processing_window}") + instream = stream + allList = [':', 'all', 'everything'] + + print(stream[0].stats.starttime.year) + year = stream[0].stats.starttime.year + month = stream[0].stats.starttime.month + day = stream[0].stats.starttime.day + + if not isinstance(processing_window, (tuple, list)): + processing_window = [processing_window] + + windows_to_get = [] + for p in processing_window: + if str(p).lower() in allList: + return instream + + if isinstance(p, (tuple, list)): + windows_to_get.append([]) + if isinstance(p[0], (obspy.UTCDateTime, datetime.datetime)) and isinstance(p[1], (obspy.UTCDateTime, datetime.datetime)): + windows_to_get[-1].append(obspy.UTCDateTime(p[0])) + windows_to_get[-1].append(obspy.UTCDateTime(p[1])) + else: + windows_to_get[-1].append(obspy.UTCDateTime(sprit_utils.format_time(p[0], tzone='UTC'))) + windows_to_get[-1].append(obspy.UTCDateTime(sprit_utils.format_time(p[1], tzone='UTC'))) + + # Make sure time are on the right day + windows_to_get[-1][0] = obspy.UTCDateTime(year, month, day, windows_to_get[-1][0].hour, windows_to_get[-1][0].minute, windows_to_get[-1][0].second) + windows_to_get[-1][1] = obspy.UTCDateTime(year, month, day, windows_to_get[-1][1].hour, windows_to_get[-1][1].minute, windows_to_get[-1][1].second) + else: + if len(processing_window) == 2: + windows_to_get = [[obspy.UTCDateTime(sprit_utils.format_time(processing_window[0], tzone='UTC')), + obspy.UTCDateTime(sprit_utils.format_time(processing_window[1], tzone='UTC'))]] + else: + print(f'The processing_window parameter of remove_noise was set as {processing_window}') + print("The processing_window parameter must be a list or tuple with a start and end time or with lists/tuples of start/end times.") + print('processing_window noise removal method not applied') + return instream + + # windows_to_get should be a list of two-item lists with UTCDateTime objects no matter how it came in + stime = instream[0].stats.starttime + etime = instream[-1].stats.endtime + + windows_to_get.insert(0, [stime, stime]) + windows_to_get.append([etime, etime]) + + # Need the list formatted slightly different, use window_UTC + window_UTC = [] + # Rearrange + for i, win in enumerate(windows_to_get): + if i == 0: + window_UTC.append([stime, windows_to_get[i+1][0]]) + elif i < len(windows_to_get) - 1: + window_UTC.append([win[1], windows_to_get[i+1][0]]) + + window_UTC.insert(0, windows_to_get[0]) + window_UTC.append(windows_to_get[-1]) + + outStream = __remove_gaps(stream, window_UTC) + + return outStream + +# Plot noise windows +def _plot_noise_windows(hvsr_data, fig=None, ax=None, clear_fig=False, fill_gaps=None, + do_stalta=False, sta=5, lta=30, stalta_thresh=[0.5,5], + do_pctThresh=False, sat_percent=0.8, min_win_size=1, + do_noiseWin=False, noise_percent=0.995, + do_warmup=False, warmup_time=0, cooldown_time=0, + return_dict=False, use_tkinter=False): + + if clear_fig: #Intended use for tkinter + #Clear everything + for key in ax: + ax[key].clear() + fig.clear() + + #Really make sure it's out of memory + fig = [] + ax = [] + try: + fig.get_children() + except: + pass + try: + ax.get_children() + except: + pass + + if use_tkinter: + try: + pass #Don't think this is being used anymore, defined in sprit_gui separately + #ax=ax_noise #self.ax_noise #? + #fig=fig_noise + except: + pass + + #Reset axes, figure, and canvas widget + noise_mosaic = [['spec'],['spec'],['spec'], + ['spec'],['spec'],['spec'], + ['signalz'],['signalz'], ['signaln'], ['signale']] + fig, ax = plt.subplot_mosaic(noise_mosaic, sharex=True) + #self.noise_canvas = FigureCanvasTkAgg(fig, master=canvasFrame_noise) + #self.noise_canvasWidget.destroy() + #self.noise_canvasWidget = self.noise_canvas.get_tk_widget()#.pack(side=tk.TOP, fill=tk.BOTH, expand=1) + #self.noise_canvasWidget.pack(fill='both')#.grid(row=0, column=0, sticky='nsew') + fig.canvas.draw() + + fig, ax = _plot_specgram_stream(stream=hvsr_data['stream'], params=hvsr_data, fig=fig, ax=ax, component='Z', stack_type='linear', detrend='mean', fill_gaps=fill_gaps, dbscale=True, return_fig=True, cmap_per=[0.1,0.9]) + fig.canvas.draw() + + #Set initial input + input = hvsr_data['stream'] + + if do_stalta: + hvsr_data['stream'] = remove_noise(hvsr_data=input, remove_method='stalta', sta=sta, lta=lta, stalta_thresh=stalta_thresh) + input = hvsr_data['stream'] + + if do_pctThresh: + hvsr_data['stream'] = remove_noise(hvsr_data=input, remove_method='saturation', sat_percent=sat_percent, min_win_size=min_win_size) + input = hvsr_data['stream'] + + if do_noiseWin: + hvsr_data['stream'] = remove_noise(hvsr_data=input, remove_method='noise', noise_percent=noise_percent, lta=lta, min_win_size=min_win_size) + input = hvsr_data['stream'] + + if do_warmup: + hvsr_data['stream'] = remove_noise(hvsr_data=input, remove_method='warmup', warmup_time=warmup_time, cooldown_time=cooldown_time) + + fig, ax, noise_windows_line_artists, noise_windows_window_artists = _get_removed_windows(input=hvsr_data, fig=fig, ax=ax, time_type='matplotlib') + + fig.canvas.draw() + plt.show() + if return_dict: + hvsr_data['Windows_Plot'] = (fig, ax) + return hvsr_data + return + + +# Helper function for manual window selection +def __draw_boxes(event, clickNo, xWindows, pathList, windowDrawn, winArtist, lineArtist, x0, fig, ax): + """Helper function for manual window selection to draw boxes to show where windows have been selected for removal""" + #Create an axis dictionary if it does not already exist so all functions are the same + + if isinstance(ax, np.ndarray) or isinstance(ax, dict): + ax = ax + else: + ax = {'a':ax} + + + if len(ax) > 1: + if type(ax) is not dict: + axDict = {} + for i, a in enumerate(ax): + axDict[str(i)] = a + ax = axDict + #else: + # ax = {'a':ax} + + #if event.inaxes!=ax: return + #y0, y1 = ax.get_ylim() + y0 = [] + y1 = [] + kList = [] + for k in ax.keys(): + kList.append(k) + y0.append(ax[k].get_ylim()[0]) + y1.append(ax[k].get_ylim()[1]) + #else: + # y0 = [ax.get_ylim()[0]] + # y1 = [ax.get_ylim()[1]] + + if clickNo == 0: + #y = np.linspace(ax.get_ylim()[0], ax.get_ylim()[1], 2) + x0 = event.xdata + clickNo = 1 + lineArtist.append([]) + winNums = len(xWindows) + for i, k in enumerate(ax.keys()): + linArt = ax[k].axvline(x0, 0, 1, color='k', linewidth=1, zorder=100) + lineArtist[winNums].append([linArt, linArt]) + #else: + # linArt = plt.axvline(x0, y0[i], y1[i], color='k', linewidth=1, zorder=100) + # lineArtist.append([linArt, linArt]) + else: + x1 = event.xdata + clickNo = 0 + + windowDrawn.append([]) + winArtist.append([]) + pathList.append([]) + winNums = len(xWindows) + for i, key in enumerate(kList): + path_data = [ + (matplotlib.path.Path.MOVETO, (x0, y0[i])), + (matplotlib.path.Path.LINETO, (x1, y0[i])), + (matplotlib.path.Path.LINETO, (x1, y1[i])), + (matplotlib.path.Path.LINETO, (x0, y1[i])), + (matplotlib.path.Path.LINETO, (x0, y0[i])), + (matplotlib.path.Path.CLOSEPOLY, (x0, y0[i])), + ] + codes, verts = zip(*path_data) + path = matplotlib.path.Path(verts, codes) + + windowDrawn[winNums].append(False) + winArtist[winNums].append(None) + + pathList[winNums].append(path) + __draw_windows(event=event, pathlist=pathList, ax_key=key, windowDrawn=windowDrawn, winArtist=winArtist, xWindows=xWindows, fig=fig, ax=ax) + linArt = plt.axvline(x1, 0, 1, color='k', linewidth=0.5, zorder=100) + + [lineArtist[winNums][i].pop(-1)] + lineArtist[winNums][i].append(linArt) + x_win = [x0, x1] + x_win.sort() #Make sure they are in the right order + xWindows.append(x_win) + fig.canvas.draw() + return clickNo, x0 + + +# Helper function for manual window selection to draw boxes to deslect windows for removal +def __remove_on_right(event, xWindows, pathList, windowDrawn, winArtist, lineArtist, fig, ax): + """Helper function for manual window selection to draw boxes to deslect windows for removal""" + + if xWindows is not None: + for i, xWins in enumerate(xWindows): + if event.xdata > xWins[0] and event.xdata < xWins[1]: + linArtists = lineArtist[i] + pathList.pop(i) + for j, a in enumerate(linArtists): + winArtist[i][j].remove()#.pop(i) + lineArtist[i][j][0].remove()#.pop(i)#[i].pop(j) + lineArtist[i][j][1].remove() + windowDrawn.pop(i) + lineArtist.pop(i)#[i].pop(j) + winArtist.pop(i)#[i].pop(j) + xWindows.pop(i) + fig.canvas.draw() + + +# Helper function for updating the canvas and drawing/deleted the boxes +def __draw_windows(event, pathlist, ax_key, windowDrawn, winArtist, xWindows, fig, ax): + """Helper function for updating the canvas and drawing/deleted the boxes""" + for i, pa in enumerate(pathlist): + for j, p in enumerate(pa): + if windowDrawn[i][j]: + pass + else: + patch = matplotlib.patches.PathPatch(p, facecolor='k', alpha=0.75) + winArt = ax[ax_key].add_patch(patch) + windowDrawn[i][j] = True + winArtist[i][j] = winArt + + if event.button is MouseButton.RIGHT: + fig.canvas.draw() + + +# Helper function for getting click event information +def __on_click(event): + """Helper function for getting click event information""" + global clickNo + global x0 + if event.button is MouseButton.RIGHT: + __remove_on_right(event, xWindows, pathList, windowDrawn, winArtist, lineArtist, fig, ax) + + if event.button is MouseButton.LEFT: + clickNo, x0 = __draw_boxes(event, clickNo, xWindows, pathList, windowDrawn, winArtist, lineArtist, x0, fig, ax) + + +# Function to select windows using original stream specgram/plots +def _select_windows(input): + """Function to manually select windows for exclusion from data. + + Parameters + ---------- + input : dict + Dictionary containing all the hvsr information. + + Returns + ------- + xWindows : list + List of two-item lists containing start and end times of windows to be removed. + """ + from matplotlib.backend_bases import MouseButton + import matplotlib.pyplot as plt + import matplotlib + import time + global fig + global ax + + if isinstance(input, (HVSRData, dict)): + if 'hvsr_curve' in input.keys(): + fig = plot_hvsr(hvsr_data=input, plot_type='spec', returnfig=True, cmap='turbo') + else: + hvsr_data = input#.copy() + input_stream = hvsr_data['stream'] + + if isinstance(input_stream, obspy.core.stream.Stream): + fig, ax = _plot_specgram_stream(input_stream, component=['Z']) + elif isinstance(input_stream, obspy.core.trace.Trace): + fig, ax = _plot_specgram_stream(input_stream) + + global lineArtist + global winArtist + global windowDrawn + global pathList + global xWindows + global clickNo + global x0 + x0=0 + clickNo = 0 + xWindows = [] + pathList = [] + windowDrawn = [] + winArtist = [] + lineArtist = [] + + global fig_closed + fig_closed = False + while fig_closed is False: + fig.canvas.mpl_connect('button_press_event', __on_click)#(clickNo, xWindows, pathList, windowDrawn, winArtist, lineArtist, x0, fig, ax)) + fig.canvas.mpl_connect('close_event', _on_fig_close)#(clickNo, xWindows, pathList, windowDrawn, winArtist, lineArtist, x0, fig, ax)) + plt.pause(1) + + hvsr_data['x_windows_out'] = xWindows + hvsr_data['fig_noise'] = fig + hvsr_data['ax_noise'] = ax + return hvsr_data + + +# Support function to help select_windows run properly +def _on_fig_close(event): + global fig_closed + fig_closed = True + return + + +# Shows windows with None on input plot +def _get_removed_windows(input, fig=None, ax=None, lineArtist =[], winArtist = [], existing_lineArtists=[], existing_xWindows=[], exist_win_format='matplotlib', keep_line_artists=True, time_type='matplotlib',show_plot=False): + """This function is for getting Nones from masked arrays and plotting them as windows""" + if fig is None and ax is None: + fig, ax = plt.subplots() + + if isinstance(input, (dict, HVSRData)): + stream = input['stream'].copy() + elif isinstance(input, (obspy.core.trace.Trace, obspy.core.stream.Stream)): + stream = input.copy() + else: + pass #Warning? + + samplesList = ['sample', 'samples', 's'] + utcList = ['utc', 'utcdatetime', 'obspy', 'u', 'o'] + matplotlibList = ['matplotlib', 'mpl', 'm'] + + #Get masked indices of trace(s) + trace = stream.merge()[0] + sample_rate = trace.stats.delta + windows = [] + #windows.append([0,np.nan]) + #mask = np.isnan(trace.data) # Create a mask for None values + #masked_array = np.ma.array(trace.data, mask=mask).copy() + masked_array = trace.data.copy() + if isinstance(masked_array, np.ma.MaskedArray): + masked_array = masked_array.mask.nonzero()[0] + lastMaskInd = masked_array[0]-1 + wInd = 0 + for i in range(0, len(masked_array)-1): + maskInd = masked_array[i] + if maskInd-lastMaskInd > 1 or i==0: + windows.append([np.nan, np.nan]) + if i==0: + windows[wInd][0] = masked_array[i] + else: + windows[wInd-1][1] = masked_array[i - 1] + windows[wInd][0] = masked_array[i] + wInd += 1 + lastMaskInd = maskInd + windows[wInd-1][1] = masked_array[-1] #Fill in last masked value (wInd-1 b/c wInd+=1 earlier) + winTypeList = ['gaps'] * len(windows) + + #Check if the windows are just gaps + if len(existing_xWindows) > 0: + existWin = [] + #Check if windows are already being taken care of with the gaps + startList = [] + endList = [] + for start, end in windows: + startList.append((trace.stats.starttime + start*sample_rate).matplotlib_date) + endList.append((trace.stats.starttime + end*sample_rate).matplotlib_date) + for w in existing_xWindows: + removed=False + if w[0] in startList and w[1] in endList: + existing_xWindows.remove(w) + + removed=True + if exist_win_format.lower() in matplotlibList and not removed: + sTimeMPL = trace.stats.starttime.matplotlib_date #Convert time to samples from starttime + existWin.append(list(np.round((w - sTimeMPL)*3600*24/sample_rate))) + + windows = windows + existWin + existWinTypeList = ['removed'] * len(existWin) + winTypeList = winTypeList + existWinTypeList + + #Reformat ax as needed + if isinstance(ax, np.ndarray): + origAxes = ax.copy() + newAx = {} + for i, a in enumerate(ax): + newAx[i] = a + axes = newAx + elif isinstance(ax, dict): + origAxes = ax + axes = ax + else: + origAxes = ax + axes = {'ax':ax} + + for i, a in enumerate(axes.keys()): + ax = axes[a] + pathList = [] + + windowDrawn = [] + winArtist = [] + if existing_lineArtists == []: + lineArtist = [] + elif len(existing_lineArtists)>=1 and keep_line_artists: + lineArtist = existing_lineArtists + else: + lineArtist = [] + + for winNums, win in enumerate(windows): + if time_type.lower() in samplesList: + x0 = win[0] + x1 = win[1] + elif time_type.lower() in utcList or time_type.lower() in matplotlibList: + #sample_rate = trace.stats.delta + + x0 = trace.stats.starttime + (win[0] * sample_rate) + x1 = trace.stats.starttime + (win[1] * sample_rate) + + if time_type.lower() in matplotlibList: + x0 = x0.matplotlib_date + x1 = x1.matplotlib_date + else: + warnings.warn(f'time_type={time_type} not recognized. Defaulting to matplotlib time formatting') + x0 = trace.stats.starttime + (win[0] * sample_rate) + x1 = trace.stats.starttime + (win[1] * sample_rate) + + x0 = x0.matplotlib_date + x1 = x1.matplotlib_date + + y0, y1 = ax.get_ylim() + + path_data = [ + (matplotlib.path.Path.MOVETO, (x0, y0)), + (matplotlib.path.Path.LINETO, (x1, y0)), + (matplotlib.path.Path.LINETO, (x1, y1)), + (matplotlib.path.Path.LINETO, (x0, y1)), + (matplotlib.path.Path.LINETO, (x0, y0)), + (matplotlib.path.Path.CLOSEPOLY, (x0, y0)), + ] + + codes, verts = zip(*path_data) + path = matplotlib.path.Path(verts, codes) + + # + windowDrawn.append(False) + winArtist.append(None) + lineArtist.append([]) + + if winTypeList[winNums] == 'gaps': + clr = '#b13d41' + elif winTypeList[winNums] == 'removed': + clr = 'k' + else: + clr = 'yellow' + + linArt0 = ax.axvline(x0, y0, y1, color=clr, linewidth=0.5, zorder=100) + linArt1 = plt.axvline(x1, y0, y1, color=clr, linewidth=0.5, zorder=100) + lineArtist[winNums].append([linArt0, linArt1]) + # + + pathList.append(path) + + for i, pa in enumerate(pathList): + if windowDrawn[i]: + pass + else: + patch = matplotlib.patches.PathPatch(pa, facecolor=clr, alpha=0.75) + winArt = ax.add_patch(patch) + windowDrawn[i] = True + winArtist[i] = winArt + + #Reformat ax as needed + if isinstance(origAxes, np.ndarray): + origAxes[i] = ax + elif isinstance(origAxes, dict): + origAxes[a] = ax + else: + origAxes = ax + + ax = origAxes + + fig.canvas.draw() + + if show_plot: + plt.show() + return fig, ax, lineArtist, winArtist + + +# Helper function for removing windows from data, leaving gaps +def __remove_windows(stream, window_list, warmup_time): + """Helper function that actually does the work in obspy to remove the windows calculated in the remove_noise function +s + Parameters + ---------- + stream : obspy.core.stream.Stream object + Input stream from which to remove windows + window_list : list + A list of windows with start and end times for the windows to be removed + warmup_time : int, default = 0 + Passed from remove_noise, the amount of time in seconds to allow for warmup. Anything before this is removed as 'noise'. + + Returns + ------- + outStream : obspy.core.stream.Stream object + Stream with a masked array for the data where 'noise' has been removed + """ + og_stream = stream.copy() + + #Find the latest start time and earliest endtime of all traces (in case they aren't consistent) + maxStartTime = obspy.UTCDateTime(-1e10) #Go back pretty far (almost 400 years) to start with + minEndTime = obspy.UTCDateTime(1e10) + for comp in ['E', 'N', 'Z']: + tr = stream.select(component=comp).copy() + if tr[0].stats.starttime > maxStartTime: + maxStartTime = tr[0].stats.starttime + if tr[0].stats.endtime < minEndTime: + minEndTime = tr[0].stats.endtime + + #Trim all traces to the same start/end time + stream.trim(starttime=maxStartTime, endtime=minEndTime) + + #Sort windows by the start of the window + sorted_window_list = [] + windowStart = [] + for i, window in enumerate(window_list): + windowStart.append(window[0]) + windowStart_og = windowStart.copy() + windowStart.sort() + sorted_start_list = windowStart + ranks = [windowStart_og.index(item) for item in sorted_start_list] + for r in ranks: + sorted_window_list.append(window_list[r]) + + for i, w in enumerate(sorted_window_list): + if i < len(sorted_window_list) - 1: + if w[1] > sorted_window_list[i+1][0]: + warnings.warn(f"Warning: Overlapping windows. Please start over and reselect windows to be removed or use a different noise removal method: {w[1]} '>' {sorted_window_list[i+1][0]}") + return + + window_gaps_obspy = [] + window_gaps = [] + + buffer_time = np.ceil((stream[0].stats.endtime-stream[0].stats.starttime)*0.01) + + #Get obspy.UTCDateTime objects for the gap times + window_gaps_obspy.append([stream[0].stats.starttime + warmup_time, stream[0].stats.starttime + warmup_time]) + for i, window in enumerate(sorted_window_list): + for j, item in enumerate(window): + if j == 0: + window_gaps_obspy.append([0,0]) + window_gaps_obspy[i+1][j] = obspy.UTCDateTime(matplotlib.dates.num2date(item)) + window_gaps.append((window[1]-window[0])*86400) + window_gaps_obspy.append([stream[0].stats.endtime-buffer_time, stream[0].stats.endtime-buffer_time]) + #Note, we added start and endtimes to obpsy list to help with later functionality + + #Clean up stream windows (especially, start and end) + for i, window in enumerate(window_gaps): + newSt = stream.copy() + #Check if first window starts before end of warmup time + #If the start of the first exclusion window is before the warmup_time is over + if window_gaps_obspy[i+1][0] - newSt[0].stats.starttime < warmup_time: + #If the end of first exclusion window is also before the warmup_time is over + if window_gaps_obspy[i+1][1] - newSt[0].stats.starttime < warmup_time: + #Remove that window completely, it is unnecessary + window_gaps.pop(i) + window_gaps_obspy.pop(i+1) + #...and reset the entire window to start at the warmup_time end + window_gaps_obspy[0][0] = window_gaps_obspy[0][1] = newSt[0].stats.starttime + warmup_time + continue + else: #if window overlaps the start of the stream after warmup_time + #Remove that window + window_gaps.pop(i) + #...and reset the start of the window to be the end of warm up time + #...and remove that first window from the obspy list + window_gaps_obspy[0][0] = window_gaps_obspy[0][1] = window_gaps_obspy[i+1][1]#newSt[0].stats.starttime + warmup_time + window_gaps_obspy.pop(i+1) + + + if stream[0].stats.endtime - window_gaps_obspy[i+1][1] > stream[0].stats.endtime - buffer_time: + if stream[0].stats.endtime - window_gaps_obspy[i+1][0] > stream[0].stats.endtime - buffer_time: + window_gaps.pop(i) + window_gaps_obspy.pop(i+1) + else: #if end of window overlaps the buffer time, just end it at the start of the window (always end with stream, not gap) + window_gaps.pop(i) + window_gaps_obspy[-1][0] = window_gaps_obspy[-1][1] = newSt[0].stats.endtime - buffer_time + + #Add streams + stream_windows = [] + j = 0 + for i, window in enumerate(window_gaps): + j=i + newSt = stream.copy() + stream_windows.append(newSt.trim(starttime=window_gaps_obspy[i][1], endtime=window_gaps_obspy[i+1][0])) + i = j + 1 + newSt = stream.copy() + stream_windows.append(newSt.trim(starttime=window_gaps_obspy[i][1], endtime=window_gaps_obspy[i+1][0])) + + for i, st in enumerate(stream_windows): + if i == 0: + outStream = st.copy() + else: + newSt = st.copy() + gap = window_gaps[i-1] + outStream = outStream + newSt.trim(starttime=st[0].stats.starttime - gap, pad=True, fill_value=None) + outStream.merge() + return outStream + + +# Remove noisy windows from df +def __remove_windows_from_df(hvsr_data, verbose=False): + # Get gaps from masked regions of traces + gaps0 = [] + gaps1 = [] + outStream = hvsr_data['stream_edited'].split() + for i, trace in enumerate(outStream): + if i == 0: + trEndTime = trace.stats.endtime + comp_end = trace.stats.component + continue # Wait until the second trace + + trStartTime = trace.stats.starttime + comp_start = trace.stats.component + firstDiff = True + secondDiff = True + + # Check if both are different from any existing gap times + if trEndTime in gaps0: + firstDiff = False + if trStartTime in gaps1: + secondDiff = False + + # If the first element and second element are both new, add to gap list + if firstDiff and secondDiff: + gaps0.append(trEndTime) + gaps1.append(trStartTime) + + trEndTime = trace.stats.endtime + + gaps = list(zip(gaps0, gaps1)) + hvsr_windows_df_exists = ('hvsr_windows_df' in hvsr_data.keys()) or ('params' in hvsr_data.keys() and 'hvsr_windows_df' in hvsr_data['params'].keys()) or ('input_params' in hvsr_data.keys() and 'hvsr_windows_df' in hvsr_data['input_params'].keys()) + if hvsr_windows_df_exists: + hvsrDF = hvsr_data['hvsr_windows_df'] + use_before = hvsrDF["Use"].copy().astype(bool) + outStream = hvsr_data['stream_edited'].split() + #for i, trace in enumerate(outStream): + #if i == 0: + # trEndTime = trace.stats.endtime + # comp_end = trace.stats.component + # continue + #trStartTime = trace.stats.starttime + #comp_start = trace.stats.component + + #if trEndTime < trStartTime and comp_end == comp_start: + hvsrDF['Use'] = hvsrDF['Use'].astype(bool) + for gap in gaps: + # All windows whose starts occur within the gap are set to False + gappedIndices = hvsrDF.between_time(gap[0].datetime.time(), gap[1].datetime.time()).index#.loc[:, 'Use'] + hvsrDF.loc[gappedIndices,'Use'] = False + + # The previous window is also set to false, since the start of the gap lies within that window + prevInd = hvsrDF.index.get_indexer([gap[0]], method='ffill') + prevDTInd = hvsrDF.index[prevInd] + hvsrDF.loc[prevDTInd, 'Use'] = False + + hvsrDF['Use'] = hvsrDF['Use'].astype(bool) + + hvsr_data['hvsr_windows_df'] = hvsrDF # May not be needed, just in case, though + + use_after = hvsrDF["Use"].astype(bool) + removed = ~use_before.eq(use_after) + + if verbose: + if removed[removed].shape[0]>0: + print(f"\n\t\tThe windows starting at the following times have been removed from further analysis ({removed[removed].shape[0]}/{hvsrDF.shape[0]})") + for t in removed[removed].index.to_pydatetime(): + print(f'\t\t {t} ') + else: + print(f"\t\tNo windows removed using remove_noise()") + + outStream.merge() + hvsr_data['stream_edited'] = outStream + else: + if verbose: + print("\n\t\t\tThe dataframe at hvsr_data['hvsr_windows_df'] has not been created yet (this is created by generate_ppsds())") + print('\t\t\tNoisy windows have been set aside for removal, ', end='') + print('but will not be removed from analysis until after hvsr_windows_df has been created') + hvsr_data['x_gaps_obspyDT'] = gaps + + return hvsr_data + + +# Helper functions for process_hvsr() +# Get diffuse field assumption data +def _dfa(x, hvsr_data=None, verbose=False):#, equal_interval_energy, median_daily_psd, verbose=False): + """Helper function for performing Diffuse Field Assumption (DFA) analysis + + x : numpy.array + Numpy array or list containing all x values (frequency or period) for each psd + hvsr_data : HVSRData object + HVSRData object containing all the data and information about the HVSR point being processed + verbose : bool, optional + Whether to print information about the DFA processing to terminal, default = False. + + """ + # Use equal energy for daily PSDs to give small 'events' a chance to contribute + # the same as large ones, so that P1+P2+P3=1 + hvsr_tSteps = [] + + if verbose: + print('\tUsing Diffuse Field Assumption (DFA)', flush=True) + warnings.warn('WARNING: DFA method is currently experimental and has not been extensively tested.') + + hvsr_data['dfa'] = {} + sum_ns_power = list() + sum_ew_power = list() + sum_z_power = list() + hvsr_data['dfa']['time_int_psd'] = {'Z':{}, 'E':{}, 'N':{}} + hvsr_data['dfa']['time_values'] = list() + hvsr_data['dfa']['equal_interval_energy'] = {'Z':{}, 'E':{}, 'N':{}} + + ti = 0 + for i, t_int in enumerate(hvsr_data['ppsds']['Z']['current_times_used']): + ti+=1 + hvsr_curve_tinterval = [] + + # Initialize some lists for later use + sum_ns_power = list() + sum_ew_power = list() + sum_z_power = list() + + # Add the time interval to the time_values list + time_int = str(t_int)#day_time.split('T')[0] + if time_int not in hvsr_data['dfa']['time_values']: + hvsr_data['dfa']['time_values'].append(time_int) + + # Get the psd data for each time, + tiIndDF = hvsr_data['hvsr_windows_df'].index[i] + hvsr_data['dfa']['time_int_psd']['Z'][time_int] = hvsr_data['hvsr_windows_df'].loc[tiIndDF,'psd_values_Z'] + hvsr_data['dfa']['time_int_psd']['E'][time_int] = hvsr_data['hvsr_windows_df'].loc[tiIndDF,'psd_values_E'] + hvsr_data['dfa']['time_int_psd']['N'][time_int] = hvsr_data['hvsr_windows_df'].loc[tiIndDF,'psd_values_N'] + + # Each PSD for the time_int (there is only one in SpRIT) + Pz = list() + P1 = list() + P2 = list() + sum_pz = 0 + sum_p1 = 0 + sum_p2 = 0 + + # Each sample of the PSD , convert to power + for j in range(len(x) - 1): + pz = __get_power([hvsr_data['dfa']['time_int_psd']['Z'][time_int][j][()], hvsr_data['dfa']['time_int_psd']['Z'][time_int][j + 1][()]], [x[j], x[j + 1]]) + Pz.append(pz) + sum_pz += pz + + p1 = __get_power([hvsr_data['dfa']['time_int_psd']['E'][time_int][j][()], hvsr_data['dfa']['time_int_psd']['E'][time_int][j + 1][()]], [x[j], x[j + 1]]) + P1.append(p1) + sum_p1 += p1 + + p2 = __get_power([hvsr_data['dfa']['time_int_psd']['N'][time_int][j][()], hvsr_data['dfa']['time_int_psd']['N'][time_int][j + 1][()]], [x[j], x[j + 1]]) + P2.append(p2) + sum_p2 += p2 + + sum_power = sum_pz + sum_p1 + sum_p2 # total power + + # Mormalized power + for j in range(len(x) - 1): + sum_z_power.append(Pz[j] / sum_power) + sum_ew_power.append(P1[j] / sum_power) + sum_ns_power.append(P2[j] / sum_power) + + # Average the normalized time interval power + for j in range(len(x) - 1): + sum_z_power[j] /= len(hvsr_data['dfa']['time_int_psd']['Z'][time_int]) + sum_ew_power[j] /= len(hvsr_data['dfa']['time_int_psd']['E'][time_int]) + sum_ns_power[j] /= len(hvsr_data['dfa']['time_int_psd']['N'][time_int]) + + hvsr_data['dfa']['equal_interval_energy']['Z'][time_int] = sum_z_power + hvsr_data['dfa']['equal_interval_energy']['E'][time_int] = sum_ew_power + hvsr_data['dfa']['equal_interval_energy']['N'][time_int] = sum_ns_power + + + # Start Second dfa section in original iris script + # Perform h/v calculation at each frequency/time step + eie = hvsr_data['dfa']['equal_interval_energy'] + for j in range(len(x) - 1): + if (time_int in list(eie['Z'].keys())) and (time_int in list(eie['E'].keys())) and (time_int in list(eie['N'].keys())): + hv_x = math.sqrt((eie['E'][time_int][j] + eie['N'][time_int][j]) / eie['Z'][time_int][j]) + hvsr_curve_tinterval.append(hv_x) + else: + if verbose > 0: + print('WARNING: '+ t_int + ' missing component, skipped!') + continue + + #Average over time + hvsr_tSteps.append(hvsr_curve_tinterval) + + return hvsr_tSteps + + +# Helper function for smoothing across frequencies +def __freq_smooth_window(hvsr_out, f_smooth_width, kind_freq_smooth): + """Helper function to smooth frequency if 'constant' or 'proportional' is passed to freq_smooth parameter of process_hvsr() function""" + if kind_freq_smooth == 'constant': + fwidthHalf = f_smooth_width//2 + elif kind_freq_smooth == 'proportional': + anyKey = list(hvsr_out['psd_raw'].keys())[0] + freqLength = hvsr_out['psd_raw'][anyKey].shape[1] + if f_smooth_width > 1: + fwidthHalf = int(f_smooth_width/100 * freqLength) + else: + fwidthHalf = int(f_smooth_width * freqLength) + else: + warnings.warn('Oops, typo somewhere') + + + for k in hvsr_out['psd_raw']: + colName = f'psd_values_{k}' + + newTPSD = list(np.stack(hvsr_out['hvsr_windows_df'][colName])) + #newTPSD = list(np.ones_like(hvsr_out['psd_raw'][k])) + + for t, tPSD in enumerate(hvsr_out['psd_raw'][k]): + for i, fVal in enumerate(tPSD): + if i < fwidthHalf: + downWin = i + ind = -1*(fwidthHalf-downWin) + windMultiplier_down = np.linspace(1/fwidthHalf, 1-1/fwidthHalf, fwidthHalf) + windMultiplier_down = windMultiplier_down[:ind] + else: + downWin = fwidthHalf + windMultiplier_down = np.linspace(1/fwidthHalf, 1-1/fwidthHalf, fwidthHalf) + if i + fwidthHalf >= len(tPSD): + upWin = (len(tPSD) - i) + ind = -1 * (fwidthHalf-upWin+1) + windMultiplier_up = np.linspace(1-1/fwidthHalf, 0, fwidthHalf) + windMultiplier_up = windMultiplier_up[:ind] + + else: + upWin = fwidthHalf+1 + windMultiplier_up = np.linspace(1 - 1/fwidthHalf, 0, fwidthHalf) + + windMultiplier = list(np.hstack([windMultiplier_down, windMultiplier_up])) + midInd = np.argmax(windMultiplier) + if i > 0: + midInd+=1 + windMultiplier.insert(midInd, 1) + smoothVal = np.divide(np.sum(np.multiply(tPSD[i-downWin:i+upWin], windMultiplier)), np.sum(windMultiplier)) + newTPSD[t][i] = smoothVal + + hvsr_out['psd_raw'][k] = newTPSD + hvsr_out['hvsr_windows_df'][colName] = pd.Series(list(newTPSD), index=hvsr_out['hvsr_windows_df'].index) + + + return hvsr_out + + +# Get an HVSR curve, given an array of x values (freqs), and a dict with psds for three components +def __get_hvsr_curve(x, psd, horizontal_method, hvsr_data, azimuth=None, verbose=False): + """ Get an HVSR curve from three components over the same time period/frequency intervals + + Parameters + ---------- + x : list or array_like + x value (frequency or period) + psd : dict + Dictionary with psd values for three components. Usually read in as part of hvsr_data from process_hvsr + horizontal_method : int or str + Integer or string, read in from process_hvsr method parameter + + Returns + ------- + tuple + (hvsr_curve, hvsr_tSteps), both np.arrays. hvsr_curve is a numpy array containing H/V ratios at each frequency/period in x. + hvsr_tSteps only used with diffuse field assumption method. + + """ + hvsr_curve = [] + hvsr_tSteps = [] + hvsr_azimuth = {} + + params = hvsr_data + if horizontal_method==1 or horizontal_method =='dfa' or horizontal_method =='Diffuse Field Assumption': + hvsr_tSteps = _dfa(x, hvsr_data, verbose) + hvsr_curve = np.mean(hvsr_tSteps, axis=0) + else: + for j in range(len(x)-1): + psd0 = [psd['Z'][j], psd['Z'][j + 1]] + psd1 = [psd['E'][j], psd['E'][j + 1]] + psd2 = [psd['N'][j], psd['N'][j + 1]] + f = [x[j], x[j + 1]] + + hvratio = __get_hvsr(psd0, psd1, psd2, f, azimuth=azimuth, use_method=horizontal_method) + hvsr_curve.append(hvratio) + + # Do azimuth HVSR Calculations, if applicable + hvratio_az = 0 + for k in psd.keys(): + if k.lower() not in ['z', 'e', 'n']: + psd_az = [psd[k][j], psd[k][j + 1]] + hvratio_az = __get_hvsr(psd0, psd_az, None, f, azimuth=azimuth, use_method='az') + if j == 0: + hvsr_azimuth[k] = [hvratio_az] + else: + hvsr_azimuth[k].append(hvratio_az) + + hvsr_tSteps = None # Only used for DFA + + + return np.array(hvsr_curve), hvsr_azimuth, hvsr_tSteps + + +# Get HVSR +def __get_hvsr(_dbz, _db1, _db2, _x, azimuth=None, use_method=4): + """ Helper function to calculate H/V ratio + + _dbz : list + Two item list with deciBel value of z component at either end of particular frequency step + _db1 : list + Two item list with deciBel value of either e or n component (does not matter which) at either end of particular frequency step + _db2 : list + Two item list with deciBel value of either e or n component (does not matter which) at either end of particular frequency step + _x : list + Two item list containing frequency values at either end of frequency step of interest + use_method : int, default = 4 + H is computed based on the selected use_method see: https://academic.oup.com/gji/article/194/2/936/597415 + use_method: + (1) Diffuse Field Assumption (DFA) + (2) arithmetic mean, that is, H ≡ (HN + HE)/2 + (3) geometric mean, that is, H ≡ √HN · HE, recommended by the SESAME project (2004) + (4) vector summation, that is, H ≡ √H2 N + H2 E + (5) quadratic mean, that is, H ≡ √(H2 N + H2 E )/2 + (6) maximum horizontal value, that is, H ≡ max {HN, HE} + """ + + _pz = __get_power(_dbz, _x) + _p1 = __get_power(_db1, _x) + + _hz = math.sqrt(_pz) + _h1 = math.sqrt(_p1) + + if _db2 is None: + _p2 = 1 + _h2 = 1 + else: + _p2 = __get_power(_db2, _x) + _h2 = math.sqrt(_p2) + + def az_calc(az, h1, h2): + if az is None: + az = 90 + az_rad = np.deg2rad(az) + return np.add(h2 * np.cos(az_rad), h1 * np.sin(az_rad)) + + _h = { 2: (_h1 + _h2) / 2.0, # Arithmetic mean + 3: math.sqrt(_h1 * _h2), # Geometric mean + 4: math.sqrt(_p1 + _p2), # Vector summation + 5: math.sqrt((_p1 + _p2) / 2.0), # Quadratic mean + 6: max(_h1, _h2), # Max horizontal value + 7: min(_h1, _h2), # Minimum horizontal value + 8: az_calc(azimuth, _h1, _h2), + 'az': _h1} # If azimuth, horizontals are already combined, no _h2} + + _hvsr = _h[use_method] / _hz + return _hvsr + + +# For converting dB scaled data to power units +def __get_power(_db, _x): + """Calculate power for HVSR + + #FROM ORIGINAL (I think this is only step 6) + Undo deciBel calculations as outlined below: + 1. Dividing the window into 13 segments having 75% overlap + 2. For each segment: + 2.1 Removing the trend and mean + 2.2 Apply a 10% sine taper + 2.3 FFT + 3. Calculate the normalized PSD + 4. Average the 13 PSDs & scale to compensate for tapering + 5. Frequency-smooth the averaged PSD over 1-octave intervals at 1/8-octave increments + 6. Convert power to decibels + #END FROM ORIGINAL + + Parameters + ---------- + _db : list + Two-item list with individual power values in decibels for specified freq step. + _x : list + Two-item list with Individual x value (either frequency or period) + + Returns + ------- + _p : float + Individual power value, converted from decibels + + NOTE + ---- + PSD is equal to the power divided by the width of the bin + PSD = P / W + log(PSD) = Log(P) - log(W) + log(P) = log(PSD) + log(W) here W is width in frequency + log(P) = log(PSD) - log(Wt) here Wt is width in period + + for each bin perform rectangular integration to compute power + power is assigned to the point at the begining of the interval + _ _ + | |_| | + |_|_|_| + + Here we are computing power for individual ponts, so, no integration is necessary, just + compute area. + """ + _dx = abs(np.diff(_x)[0]) + _p = np.multiply(np.mean(__remove_db(_db)), _dx) + return _p + + +# Remove decibel scaling +def __remove_db(_db_value): + """convert dB power to power""" + _values = list() + for _d in _db_value: + _values.append(10 ** (float(_d) / 10.0)) + #FIX THIS + if _values[1]==0: + _values[1]=10e-300 + return _values + + +# Find peaks in the hvsr ccruve +def __find_peaks(_y): + """Finds all possible peaks on hvsr curves + Parameters + ---------- + _y : list or array + _y input is list or array of a curve. + In this case, this is either main hvsr curve or individual time step curves + """ + _index_list = scipy.signal.argrelextrema(np.array(_y), np.greater) + + return _index_list[0] + + +# Get additional HVSR params for later calcualtions +def __gethvsrparams(hvsr_out): + """Private function to get HVSR parameters for later calculations (things like standard deviation, etc)""" + + hvsrp2 = {} + hvsrm2 = {} + + hvsrp2=[] + hvsrm=[] + + hvsr_log_std = {} + + hvsr = hvsr_out['hvsr_curve'] + hvsr_az = hvsr_out['hvsr_az'] + hvsrDF = hvsr_out['hvsr_windows_df'] + + if len(hvsr_out['ind_hvsr_curves'].keys()) > 0: + # With arrays, original way of doing it + hvsr_log_std = {} + for k in hvsr_out['ind_hvsr_curves'].keys(): + hvsr_log_std[k] = np.nanstd(np.log10(hvsr_out['ind_hvsr_curves'][k]), axis=0) + + #With dataframe, updated way to use DF for all time-step tasks, still testing + logStackedata = {} + hvsrp = {} + hvsrm = {} + hvsrp2 = {} + hvsrm2 = {} + hvsr_log_std = {} + for col_name in hvsr_out['hvsr_windows_df'].columns: + if col_name.startswith("HV_Curves"): + if col_name == 'HV_Curves': + colSuffix = '_HV' + colID = 'HV' + else: + colSuffix = '_'+'_'.join(col_name.split('_')[2:]) + colID = colSuffix.split('_')[1] + stackedData = np.stack(hvsr_out['hvsr_windows_df'][col_name]) + + logStackedata = np.log10(stackedData).tolist() + for i, r in enumerate(logStackedata): + logStackedata[i] = np.array(r) + + hvsr_out['hvsr_windows_df']['Log10_HV_Curves'+colSuffix] = logStackedata + hvsr_log_std[colID] = np.nanstd(np.stack(hvsr_out['hvsr_windows_df']['Log10_HV_Curves'+colSuffix][hvsrDF['Use']]), axis=0) + + #The components are already calculated, don't need to recalculate aren't calculated at the time-step level + hvsrp[colID] = np.add(hvsr_out['hvsr_curve'], hvsr_out['ind_hvsr_stdDev'][colID]) + hvsrm[colID] = np.subtract(hvsr_out['hvsr_curve'], hvsr_out['ind_hvsr_stdDev'][colID]) + for k in hvsr_out['hvsr_az'].keys(): + hvsrp[colID] = np.add(hvsr_out['hvsr_az'][k], hvsr_out['ind_hvsr_stdDev'][colID]) + hvsrm[colID] = np.subtract(hvsr_out['hvsr_az'][k], hvsr_out['ind_hvsr_stdDev'][colID]) + hvsrp2[colID] = np.multiply(hvsr, np.exp(hvsr_log_std[colID])) + hvsrm2[colID] = np.divide(hvsr, np.exp(hvsr_log_std[colID])) + + newKeys = ['hvsr_log_std', 'hvsrp','hvsrm', 'hvsrp2','hvsrm2'] + newVals = [hvsr_log_std, hvsrp, hvsrm, hvsrp2, hvsrm2] + for i, nk in enumerate(newKeys): + if nk not in hvsr_out.keys(): + hvsr_out[nk] = {} + hvsr_out[nk][colID] = np.array(newVals[i][colID]) + + return hvsr_out + + +# HELPER FUNCTIONS FOR GET REPORT +# Private function to generate print report +def _generate_print_report(hvsr_results, azimuth="HV", show_print_report=True, verbose=False): + """Helper function to perform create a printed (monospace) report with summary data for HVSR Site + + Parameters + ---------- + hvsr_results : HVSRData object + HVSRData object with data to be reported on + show_print_report : bool, optional + Whether output will be produced or not (if show_print_report=True, no ouptut will be produced (report will not be printed)), by default False + + Returns + ------- + HVSRData object + HVSRData object with the ["Print_Report"] attribute created or updated. + The .Print_Report attribute is a formatted string that can be + displayed using print(hvsr_results['Print_Report'] with a summary of the HVSR results) + """ + #Print results + + #Make separators for nicely formatted print output + sepLen = 99 + siteSepSymbol = '=' + intSepSymbol = u"\u2012" + extSepSymbol = u"\u2014" + + if sepLen % 2 == 0: + remainVal = 1 + else: + remainVal = 0 + + siteWhitespace = 2 + #Format the separator lines internal to each site + internalSeparator = intSepSymbol.center(sepLen-4, intSepSymbol).center(sepLen, ' ') + + extSiteSeparator = "".center(sepLen, extSepSymbol) + siteSeparator = f"{hvsr_results['input_params']['site']}".center(sepLen - siteWhitespace, ' ').center(sepLen, siteSepSymbol) + endSiteSeparator = "".center(sepLen, siteSepSymbol) + + #Start building list to print + report_string_list = [] + report_string_list.append("") #Blank line to start + report_string_list.append(extSiteSeparator) + report_string_list.append(siteSeparator) + report_string_list.append(extSiteSeparator) + #report_string_list.append(internalSeparator) + report_string_list.append('') + report_string_list.append(f"\tSite Name: {hvsr_results['input_params']['site']}") + report_string_list.append(f"\tAcq. Date: {hvsr_results['input_params']['acq_date']}") + report_string_list.append(f"\tLocation : {hvsr_results['input_params']['longitude']}, {hvsr_results['input_params']['latitude']}") + report_string_list.append(f"\tElevation: {hvsr_results['input_params']['elevation']}") + report_string_list.append('') + report_string_list.append(internalSeparator) + report_string_list.append('') + if 'BestPeak' not in hvsr_results.keys(): + report_string_list.append('\tNo identifiable BestPeak was present between {} for {}'.format(hvsr_results['input_params']['hvsr_band'], hvsr_results['input_params']['site'])) + else: + curvTestsPassed = (hvsr_results['BestPeak'][azimuth]['PassList']['WinLen'] + + hvsr_results['BestPeak'][azimuth]['PassList']['SigCycles']+ + hvsr_results['BestPeak'][azimuth]['PassList']['LowCurveStD']) + curvePass = curvTestsPassed > 2 + + #Peak Pass? + peakTestsPassed = ( hvsr_results['BestPeak'][azimuth]['PassList']['ProminenceLow'] + + hvsr_results['BestPeak'][azimuth]['PassList']['ProminenceHi']+ + hvsr_results['BestPeak'][azimuth]['PassList']['AmpClarity']+ + hvsr_results['BestPeak'][azimuth]['PassList']['FreqStability']+ + hvsr_results['BestPeak'][azimuth]['PassList']['LowStDev_Freq']+ + hvsr_results['BestPeak'][azimuth]['PassList']['LowStDev_Amp']) + peakPass = peakTestsPassed >= 5 + + report_string_list.append('\t{0:.3f} Hz Peak Frequency ± {1:.4f} Hz'.format(hvsr_results['BestPeak'][azimuth]['f0'], float(hvsr_results["BestPeak"][azimuth]['Sf']))) + if curvePass and peakPass: + report_string_list.append('\t {} Peak at {} Hz passed quality checks! :D'.format(sprit_utils.check_mark(), round(hvsr_results['BestPeak'][azimuth]['f0'],3))) + else: + report_string_list.append('\t {} Peak at {} Hz did NOT pass quality checks :('.format(sprit_utils.x_mark(), round(hvsr_results['BestPeak'][azimuth]['f0'],3))) + report_string_list.append('') + report_string_list.append(internalSeparator) + report_string_list.append('') + + justSize=34 + #Print individual results + report_string_list.append('\tCurve Tests: {}/3 passed (3/3 needed)'.format(curvTestsPassed)) + report_string_list.append(f"\t\t {hvsr_results['BestPeak'][azimuth]['Report']['Lw'][-1]}"+" Length of processing windows".ljust(justSize)+f"{hvsr_results['BestPeak'][azimuth]['Report']['Lw']}") + report_string_list.append(f"\t\t {hvsr_results['BestPeak'][azimuth]['Report']['Nc'][-1]}"+" Number of significant cycles".ljust(justSize)+f"{hvsr_results['BestPeak'][azimuth]['Report']['Nc']}") + report_string_list.append(f"\t\t {hvsr_results['BestPeak'][azimuth]['Report']['σ_A(f)'][-1]}"+" Small H/V StDev over time".ljust(justSize)+f"{hvsr_results['BestPeak'][azimuth]['Report']['σ_A(f)']}") + + report_string_list.append('') + report_string_list.append("\tPeak Tests: {}/6 passed (5/6 needed)".format(peakTestsPassed)) + report_string_list.append(f"\t\t {hvsr_results['BestPeak'][azimuth]['Report']['A(f-)'][-1]}"+" Peak is prominent below".ljust(justSize)+f"{hvsr_results['BestPeak'][azimuth]['Report']['A(f-)']}") + report_string_list.append(f"\t\t {hvsr_results['BestPeak'][azimuth]['Report']['A(f+)'][-1]}"+" Peak is prominent above".ljust(justSize)+f"{hvsr_results['BestPeak'][azimuth]['Report']['A(f+)']}") + report_string_list.append(f"\t\t {hvsr_results['BestPeak'][azimuth]['Report']['A0'][-1]}"+" Peak is large".ljust(justSize)+f"{hvsr_results['BestPeak'][azimuth]['Report']['A0']}") + if hvsr_results['BestPeak'][azimuth]['PassList']['FreqStability']: + res = sprit_utils.check_mark() + else: + res = sprit_utils.x_mark() + report_string_list.append(f"\t\t {res}"+ " Peak freq. is stable over time".ljust(justSize)+ f"{hvsr_results['BestPeak'][azimuth]['Report']['P-'][:5]} and {hvsr_results['BestPeak'][azimuth]['Report']['P+'][:-1]} {res}") + report_string_list.append(f"\t\t {hvsr_results['BestPeak'][azimuth]['Report']['Sf'][-1]}"+" Stability of peak (Freq. StDev)".ljust(justSize)+f"{hvsr_results['BestPeak'][azimuth]['Report']['Sf']}") + report_string_list.append(f"\t\t {hvsr_results['BestPeak'][azimuth]['Report']['Sa'][-1]}"+" Stability of peak (Amp. StDev)".ljust(justSize)+f"{hvsr_results['BestPeak'][azimuth]['Report']['Sa']}") + report_string_list.append('') + report_string_list.append(f"Calculated using {hvsr_results['hvsr_windows_df']['Use'].astype(bool).sum()}/{hvsr_results['hvsr_windows_df']['Use'].count()} time windows".rjust(sepLen-1)) + report_string_list.append(extSiteSeparator) + #report_string_list.append(endSiteSeparator) + #report_string_list.append(extSiteSeparator) + report_string_list.append('') + + reportStr='' + #Now print it + for line in report_string_list: + reportStr = reportStr+'\n'+line + + if show_print_report or verbose: + print(reportStr) + + hvsr_results['BestPeak'][azimuth]['Report']['Print_Report'] = reportStr + if azimuth=='HV': + hvsr_results['Print_Report'] = reportStr + return hvsr_results + + +# Private function to generate table report +def _generate_table_report(hvsr_results, azimuth='HV', show_table_report=True, verbose=False): + """Helper function for get_report() to generate a site report formatted into a pandas dataframe + + Parameters + ---------- + hvsr_results : HVSRData + HVSRData object containing information about which the report will be generated. + azimuth : str, optional + The azimuth for which this report will be generated. If none specified/calculated, by default 'HV' + show_table_report : bool, optional + Whether to print the table report (as text) to the terminal + verbose : bool, optional + Whether or not to print information about the table report generation (including the pandas dataframe upon creation) to the terminal, by default False + + + Returns + ------- + HVSRData + An HVSRData object with the ["Table_Report"] attribute created/updated. + This is a pandas.DataFrame instance, but can be exported to csv. + """ + + coord0Dir = hvsr_results['input_params']['output_crs'].axis_info[0].direction + + # Figure out which coordinate axis is which (some CRS do Y, X) + if coord0Dir.lower() in ['north', 'south']: + xaxisinfo = hvsr_results['input_params']['output_crs'].axis_info[1] + yaxisinfo = hvsr_results['input_params']['output_crs'].axis_info[0] + else: + xaxisinfo = hvsr_results['input_params']['output_crs'].axis_info[0] + yaxisinfo = hvsr_results['input_params']['output_crs'].axis_info[1] + + # Get the axis name + xaxis_name = xaxisinfo.name + yaxis_name = yaxisinfo.name + + # Simplify the axis name + if 'longitude' in xaxis_name.lower(): + xaxis_name = 'Longitude' + if 'latitude' in yaxis_name.lower(): + yaxis_name = 'Latitude' + + pdCols = ['Site Name', 'Acq_Date', xaxis_name, yaxis_name, 'Elevation', 'Peak', 'Peak_StDev', + 'PeakPasses','WinLen','SigCycles','LowCurveStD', + 'ProminenceLow','ProminenceHi','AmpClarity','FreqStability', 'LowStDev_Freq','LowStDev_Amp'] + d = hvsr_results + criteriaList = [] + criteriaList.append(hvsr_results['BestPeak'][azimuth]["PeakPasses"]) + for p in hvsr_results['BestPeak'][azimuth]["PassList"]: + criteriaList.append(hvsr_results['BestPeak'][azimuth]["PassList"][p]) + dfList = [[d['input_params']['site'], d['input_params']['acq_date'], d['input_params']['xcoord'], d['input_params']['ycoord'], d['input_params']['elevation'], round(d['BestPeak'][azimuth]['f0'], 3), round(d['BestPeak'][azimuth]['Sf'], 4)]] + dfList[0].extend(criteriaList) + + outDF = pd.DataFrame(dfList, columns=pdCols) + outDF.index.name = 'ID' + + if show_table_report or verbose: + print('\nTable Report:\n') + maxColWidth = 13 + print(' ', end='') + for col in outDF.columns: + if len(str(col)) > maxColWidth: + colStr = str(col)[:maxColWidth-3]+'...' + else: + colStr = str(col) + print(colStr.ljust(maxColWidth), end=' ') + print() #new line + for c in range(len(outDF.columns) * (maxColWidth+2)): + if c % (maxColWidth+2) == 0: + print('|', end='') + else: + print('-', end='') + print('|') #new line + print(' ', end='') #Small indent at start + for row in outDF.iterrows(): + for col in row[1]: + if len(str(col)) > maxColWidth: + colStr = str(col)[:maxColWidth-3]+'...' + else: + colStr = str(col) + print(colStr.ljust(maxColWidth), end=' ') + print() + + hvsr_results['BestPeak'][azimuth]['Report']['Table_Report'] = outDF + if azimuth=='HV': + hvsr_results['Table_Report'] = outDF + return hvsr_results + + +# Display html report without creating temporary file +def _display_html_report(html_report): + import platform + import tempfile + import time + import webbrowser + + autodelete = platform.system() != "Windows" + + with tempfile.NamedTemporaryFile(mode="w", delete=autodelete, suffix=".html") as tmp_file: + tmp_file.write(html_report) + file_path = tmp_file.name + file_path = file_path.replace('\\'[0], '/') + rawfpath = file_path + print(rawfpath) + + if autodelete: + client = webbrowser + if not file_path.startswith("file:///"): + file_path = f"file:///{file_path}" + client.open_new(file_path) + # Adding a short sleep so that the file does not get cleaned + # up immediately in case the browser takes a while to boot. + time.sleep(3) + + if not autodelete: + client = webbrowser + if not file_path.startswith("file:///"): + file_path = f"file:///{file_path}" + client.open_new(file_path) + + time.sleep(3) + os.unlink(rawfpath) # Cleaning up the file in case of Windows + + +# Private function for html report generation +def _generate_html_report(hvsr_results, show_html_report=False, verbose=False): + """Private function that generates html report, intented to be used by get_report() public function + + Parameters + ---------- + hvsr_results : HVSRData or HVSRBatch + Input data from which to generate report + show_html_report : bool, optional + Whether to show the report or simply generate and save it in the "HTML_Report" attribute of hvsr_results, by default False + verbose : bool, optional + Whether to print information about the HTML report generation to terminal + + Returns + ------- + HVSRData or HVSRBatch + Returns the input dataset, with the HTML_Report attribute updated with the html text of the report + """ + resources_dir = pathlib.Path(pkg_resources.resource_filename(__name__, 'resources/')) + htmlTemplatePath = resources_dir.joinpath('html_report_template.html') + + with open(htmlTemplatePath, 'r') as htmlF: + html = htmlF.read() + + # Update report title (site name) + html = html.replace("HVSR_REPORT_TITLE", hvsr_results['site']) + + # Update peak freq info + html = html.replace("PEAKFREQ", str(round(hvsr_results['BestPeak']['HV']['f0'], 3))) + html = html.replace("PEAKSTDEV", str(round(hvsr_results['BestPeak']['HV']['Sf'], 3))) + + if hvsr_results.Table_Report['PeakPasses'][0]: + html = html.replace("SESAME_TESTS_RESULTS", 'Peak has passed the SESAME validation tests.') + else: + html = html.replace("SESAME_TESTS_RESULTS", 'Peak did not pass the SESAME validation tests.') + + # Update image source + # Save the plot to a BytesIO object + # Default to matplotlib object + plotEngine = 'matplotlib' + if 'get_report' in hvsr_results.processing_parameters: + plotEngine = hvsr_results.processing_parameters['get_report']['plot_engine'].lower() + + if plotEngine not in ['plotly', 'plty', 'p']: + # Create a byte stream from the image + buf = io.BytesIO() + plt.savefig(buf, format='png') + buf.seek(0) + + # Encode the image to base64 + hvplot_base64 = base64.b64encode(buf.read()).decode('utf-8') + # Embed the image in the html document + html = html.replace("./output.png", f'data:image/png;base64,{hvplot_base64}') + else: + + img = plotly.io.to_image(hvsr_results.HV_Plot, format='png', engine='auto') + hvplot_base64 = base64.b64encode(img).decode('utf8') + + html = html.replace("./output.png", f'data:image/png;base64,{hvplot_base64}') + + # Update formatting for print report for html + html_print_report = hvsr_results.Print_Report.replace('\n', '<br>').replace('\t', "&nbsp;&nbsp;&nbsp;&nbsp;") + html_print_report = html_print_report.replace('<br>', '', 2) #Remove the first two breaks + html_print_report = html_print_report.replace('✔', '&#10004;') + html_print_report = html_print_report.replace('✘', '&cross;') + + majorSepLine = u"\u2014"*99 + majorSepLine = u"\u2014"*99 + minorSepLine = u"\u2012"*95 + majorSepLineHTML = '&mdash;'*40 + minorSepLineHTML = '&mdash;&nbsp;'*25 + + startInd = html_print_report.index('&nbsp;&nbsp;&nbsp;&nbsp;Site Name:') + html_print_report = "<br><br>" + html_print_report[startInd:] + lastInd = html_print_report.index(majorSepLine) + html_print_report = html_print_report[:lastInd] + + html_print_report = html_print_report.replace(majorSepLine, 'majorSepLineHTML') # Replace the major separator lines + html_print_report = html_print_report.replace(minorSepLine, minorSepLineHTML) # Replace the minor separator lines + html_print_report = html_print_report.replace("=", '') # Get rid of = + + html = html.replace('HVSR_PRINT_REPORT', html_print_report) + + # Update table + htmlTable = hvsr_results.Table_Report.iloc[:,2:] + for i in range(len(htmlTable.columns)): + tableHeader = htmlTable.columns[i] + #html = html.replace(f"TableHeader_{str(i).zfill(2)}", tableHeader) + + tableValue = htmlTable.iloc[:,i][0] + html = html.replace(f"TableData_{str(i).zfill(2)}", str(tableValue)) + + coord0Dir = hvsr_results['input_params']['output_crs'].axis_info[0].direction + + # Figure out which coordinate axis is which (some CRS do Y, X) + if coord0Dir.lower() in ['north', 'south']: + xaxisinfo = hvsr_results['input_params']['output_crs'].axis_info[1] + yaxisinfo = hvsr_results['input_params']['output_crs'].axis_info[0] + else: + xaxisinfo = hvsr_results['input_params']['output_crs'].axis_info[0] + yaxisinfo = hvsr_results['input_params']['output_crs'].axis_info[1] + + # Get the axis name + xaxis_name = xaxisinfo.name + yaxis_name = yaxisinfo.name + + # Simplify the axis name + if 'longitude' in xaxis_name.lower(): + xaxis_name = 'Longitude' + if 'latitude' in yaxis_name.lower(): + yaxis_name = 'Latitude' + + + html = html.replace("X_Coordinate", xaxis_name) + html = html.replace("Y_Coordinate", yaxis_name) + + html = html.replace("Deg_E", xaxisinfo.unit_name) + html = html.replace("Deg_N", yaxisinfo.unit_name) + + hvsr_results['HTML_Report'] = html + + # View in browser, if indicated to + if show_html_report: + try: + _display_html_report(html) + except Exception as e: + print('\tHTML Report could not be displayed, but has been saved to the .HTML_Report attribute') + print(e) + + return hvsr_results + + +# Private/Helper function to generate pdf report +def _generate_pdf_report(hvsr_results, pdf_report_filepath=None, show_pdf_report=False, show_html_report=False, verbose=False): + """Private/helper function to generate pdf report from HTML report, intended to be used by get_report() function + + Parameters + ---------- + hvsr_results : HVSRData or HVSRBatch + Input dataset with all processing already carried out + show_pdf_report : bool, optional + EXPERIMENTAL: Whether to open the report after generating it, by default False + show_html_report : bool, optional + Whether to open the html report that the pdf report is based on, by default False + verbose : bool, optional + Whether to print verbose description of what the function is doing + """ + from xhtml2pdf import pisa + + # Generate HTML Report if not already (this will be converted to pdf using xhtml2pdf) + if not hasattr(hvsr_results, "HTML_Report"): + hvsr_results = _generate_html_report(hvsr_results, show_html_report=show_html_report) + if verbose: + print('\tNo HTML Report previously generated, attempting now.') + # try Code to generate HTML report from template + + htmlReport = hvsr_results['HTML_Report'] + + if pdf_report_filepath is None: + if verbose: + print('\t pdf_report_filepath not specified, saving to temporary file.') + with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as temp_file: + pdf_export_path = temp_file.name # Get the name of the temporary file + + # Now, open the file again for writing + with open(pdf_export_path, 'wb') as temp_file: + pisa_status = pisa.CreatePDF(htmlReport, dest=temp_file) + + else: + if pathlib.Path(pdf_report_filepath).is_dir(): + fname = f"REPORT_{hvsr_results['site']}_{hvsr_results['hvsr_id']}.pdf" + pdf_report_filepath = pathlib.Path(pdf_report_filepath).joinpath(fname) + + try: + with open(pdf_report_filepath, "w+b") as export_file: + pisa_status = pisa.CreatePDF(htmlReport, dest=export_file) + pdf_export_path = pdf_report_filepath + if verbose: + print(f'PDF report saved to {pdf_export_path}') + except Exception as e: + print(f'PDF could not be saved to {pdf_report_filepath}') + if verbose: + print(f'\t{e}') + + with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as temp_file: + pdf_export_path = temp_file.name # Get the name of the temporary file + print(f'Saving pdf to temporary file instead: {temp_file.name}') + # Now, open the file again for writing + with open(pdf_export_path, 'wb') as temp_file: + pisa_status = pisa.CreatePDF(htmlReport, dest=temp_file) + + + if verbose: + if not str(pisa_status.err) == '0': + print('\t', pisa_status.err) + + if show_html_report: + _display_html_report(hvsr_results['HTML_Report']) + + if show_pdf_report: + if verbose: + print(f'\tAttempting to open pdf at {pdf_export_path}') + + print('\t**Opening pdfs with the show_pdf_report or show_report parameter is experimental**') + + try: + os.startfile(pdf_export_path) + except Exception as e: + print(f"\tSpRIT cannot open your pdf report, but it has been saved to {pdf_export_path}") + print('\tAttempting to open HTML version of report') + try: + _display_html_report(html) + except Exception as e: + print('\tHTML Report could not be displayed, but has been saved to the .HTML_Report attribute') + + return hvsr_results + +# Plot hvsr curve, private supporting function for plot_hvsr +def _plot_hvsr(hvsr_data, plot_type, xtype='frequency', fig=None, ax=None, azimuth='HV', save_dir=None, save_suffix='', show_plot=True, **kwargs): + """Private function for plotting hvsr curve (or curves with components) + """ + if 'kwargs' in kwargs.keys(): + kwargs = kwargs['kwargs'] + + if fig is None and ax is None: + fig, ax = plt.subplots() + + if 'xlim' not in kwargs.keys(): + xlim = hvsr_data['hvsr_band'] + else: + xlim = kwargs['xlim'] + + if 'ylim' not in kwargs.keys(): + ylim = [0, max(hvsr_data['hvsrp2'][azimuth])*1.05] + if ylim[1] > 25: + ylim = [0, max(hvsr_data['hvsr_curve']+1)] + else: + ylim = kwargs['ylim'] + + if 'grid' in kwargs.keys(): + plt.grid(which=kwargs['grid'], alpha=0.25) + + hvsrDF = hvsr_data.hvsr_windows_df + + freqList = ['x_freqs', 'freqs', 'freq', 'hz', 'f', 'frequency'] + if xtype.lower() in freqList: + xlabel = 'Frequency [Hz]' + else: + xlabel = 'Period [s]' + + if save_dir is not None: + filename = hvsr_data['input_params']['site'] + else: + filename = "" + + anyKey = list(hvsr_data[xtype].keys())[0] + x = hvsr_data[xtype][anyKey][:-1] + y = hvsr_data['hvsr_curve'] + + plotSuff = '' + legendLoc = 'upper left' + + plotHVSR = False + for item in plot_type: + if item.lower()=='hvsr': + plotHVSR = True + ax.plot(x, y, color='k', label='H/V Ratio', zorder=1000) + plotSuff = 'HVSRCurve_' + if '-s' not in plot_type: + ax.fill_between(x, hvsr_data['hvsrm2'][azimuth], hvsr_data['hvsrp2'][azimuth], color='k', alpha=0.2, label='StDev',zorder=997) + ax.plot(x, hvsr_data['hvsrm2'][azimuth], color='k', alpha=0.25, linewidth=0.5, zorder=998) + ax.plot(x, hvsr_data['hvsrp2'][azimuth], color='k', alpha=0.25, linewidth=0.5, zorder=999) + else: + plotSuff = plotSuff+'noStdDev_' + break + + ax.semilogx() + ax.set_ylim(ylim) + ax.set_xlim(xlim) + ax.set_ylabel('H/V Ratio'+'\n['+hvsr_data['horizontal_method']+']', fontsize='small',) + ax.tick_params(axis='x', labelsize=8) + ax.tick_params(axis='y', labelsize=5) + plt.suptitle(hvsr_data['input_params']['site']) + + if "BestPeak" in hvsr_data.keys(): + f0 = hvsr_data['BestPeak'][azimuth]['f0'] + a0 = hvsr_data['BestPeak'][azimuth]['A0'] + else: + f0 = hvsr_data['hvsr_band'][0] + a0 = 0 + f0_div4 = f0/4 + f0_mult4 = f0*4 + a0_div2 = a0/2 + + # Predefine so only need to set True if True + peakAmpAnn = False + peakPoint = False + peakLine = False + used = hvsrDF['Use'].astype(bool) + notused = ~hvsrDF['Use'].astype(bool) + + for k in plot_type: + + # Show peak(s) + # Show f0 peak (and annotate if indicated) + if k=='p' and 'all' not in plot_type: + plotSuff=plotSuff+'BestPeak_' + + bestPeakScore = 0 + for i, p in enumerate(hvsr_data['PeakReport'][azimuth]): + if p['Score'] > bestPeakScore: + bestPeakScore = p['Score'] + bestPeak = p + + ax.axvline(bestPeak['f0'], color='k', linestyle='dotted', label='Peak') + + # Annotate primary peak + if 'ann' in plot_type: + xLoc = bestPeak['f0'] + yLoc = ylim[0] + (ylim[1] - ylim[0]) * 0.008 + ax.text(x=xLoc, y=yLoc, s="Peak at "+str(round(bestPeak['f0'],2))+'Hz', + fontsize='xx-small', horizontalalignment='center', verticalalignment='bottom', + bbox=dict(facecolor='w', edgecolor='none', alpha=0.8, pad=0.1)) + plotSuff = plotSuff+'ann_' + #Show all peaks in h/v curve + elif k=='p' and 'all' in plot_type: + plotSuff = plotSuff+'allPeaks_' + + ax.vlines(hvsr_data['hvsr_peak_freqs'][azimuth], ax.get_ylim()[0], ax.get_ylim()[1], colors='k', linestyles='dotted', label='Peak') + + # Annotate all peaks + if 'ann' in plot_type: + for i, p in enumerate(hvsr_data['hvsr_peak_freqs']): + y = hvsr_data['hvsr_curve'][hvsr_data['hvsr_peak_indices'][i]] + ax.annotate('Peak at '+str(round(p,2))+'Hz', (p, 0.1), xycoords='data', + horizontalalignment='center', verticalalignment='bottom', + bbox=dict(facecolor='w', edgecolor='none', alpha=0.8, pad=0.1)) + plotSuff=plotSuff+'ann_' + + # Show primary peak amplitude (and annotate if indicated) + if k=='pa': + ax.hlines([a0], ax.get_xlim()[0], f0, linestyles='dashed') + ax.scatter([f0], [a0], marker="o", facecolor='none', edgecolor='k') + peakPoint = True + peakLine = True + + # Annotate primary peak amplitude + if 'ann' in plot_type: + ax.annotate(f"Peak Amp.: {a0:.2f}", [f0+0.1*f0, a0]) + peakAmpAnn = True + + # Show the curves and/or peaks at each time window + if 't' in k and 'test' not in k: + plotSuff = plotSuff+'allTWinCurves_' + + # If this is a component subplot + if kwargs['subplot'] == 'comp': + + if k == 'tp': + pass # This is not calculated for individual components + if k == 't': + azKeys = ['Z', 'E', 'N'] + azKeys.extend(list(hvsr_data.hvsr_az.keys())) + azColors = {'Z':'k', 'E':'b', 'N':'r'} + for az in azKeys: + if az.upper() in azColors.keys(): + col = azColors[az] + else: + col = 'g' + + for pv, t in enumerate(np.stack(hvsrDF[used]['psd_values_'+az])): + ax.plot(x, t[:-1], color=col, alpha=0.2, linewidth=0.8, linestyle=':', zorder=0) + # For the main H/V plot + else: + # Show all peaks at all times (semitransparent red bars) + if k == 'tp': + for j, t in enumerate(hvsrDF[used]['CurvesPeakIndices_'+azimuth]): + for i, v in enumerate(t): + v= x[v] + if i==0: + width = (x[i+1]-x[i])/16 + else: + width = (x[i]-x[i-1])/16 + if j == 0 and i==0: + ax.fill_betweenx(ylim,v-width,v+width, color='r', alpha=0.05, label='Individual H/V Peaks') + else: + ax.fill_betweenx(ylim,v-width,v+width, color='r', alpha=0.05) + # Show curves at all time windows + if k == 't': + for t in np.stack(hvsrDF[used]['HV_Curves']): + ax.plot(x, t, color='k', alpha=0.25, linewidth=0.8, linestyle=':') + for t in np.stack(hvsrDF[notused]['HV_Curves']): + ax.plot(x, t, color='orangered', alpha=0.666, linewidth=0.8, linestyle=':', zorder=0) + + # Plot SESAME test results and thresholds on HVSR plot + if 'test' in k and kwargs['subplot'] == 'hvsr': + if k=='tests' or 'all' in k or ':' in k: + # Change k to pass all test plot conditions + k='test123456c' + + if '1' in k: + # Peak is higher than 2x lowest point in f0/4-f0 + # Plot the line threshold that the curve needs to cross + ax.plot([f0_div4, f0], [a0_div2, a0_div2], color='tab:blue', marker='|', linestyle='dashed') + + # Get minimum of curve in desired range + indexList=[] + fList = [] + for i, f in enumerate(hvsr_data.x_freqs['Z']): + if f >= f0_div4 and f <= f0: + indexList.append(i) + fList.append(f) + + newCurveList= [] + newFreqList = [] + for ind in indexList: + if ind < len(hvsr_data.hvsr_curve): + newFreqList.append(hvsr_data.x_freqs['Z'][ind]) + newCurveList.append(hvsr_data.hvsr_curve[ind]) + curveTestList = list(np.ones_like(newFreqList) * a0_div2) + + + # Plot line showing where test succeeds or not + if hvsr_data['BestPeak'][azimuth]['Report']['A(f-)'][-1] == sprit_utils.x_mark(): + lowf2 = float(hvsr_data['BestPeak'][azimuth]['Report']['A(f-)'].replace('Hz', '').replace('-', '').split()[-3]) + hif2 = float(hvsr_data['BestPeak'][azimuth]['Report']['A(f-)'].replace('Hz', '').replace('-', '').split()[-2]) + ym = float(hvsr_data['BestPeak'][azimuth]['Report']['A(f-)'].replace('Hz', '').replace('-', '').split()[3]) + yp = min(newCurveList) + ax.fill_betweenx(y=[ym, yp], x1=lowf2, x2=hif2, alpha=0.1, color='r') + else: + #fpass = float(hvsr_data['BestPeak'][azimuth]['Report']['A(f-)'].replace('Hz', '').replace('-', '').split()[3]) + #fpassAmp = float(hvsr_data['BestPeak'][azimuth]['Report']['A(f-)'].replace('Hz', '').replace('-', '').split()[5]) + ax.fill_between(newFreqList, y1=newCurveList, y2=curveTestList, where=np.array(newCurveList)<=a0_div2, color='g', alpha=0.2) + minF = newFreqList[np.argmin(newCurveList)] + minA = min(newCurveList) + ax.plot([minF, minF, minF], [0, minA, a0_div2], marker='.', color='g', linestyle='dotted') + + # Plot the Peak Point if not already + if not peakPoint: + ax.scatter([f0], [a0], marker="o", facecolor='none', edgecolor='k') + peakPoint=True + + # Annotate the Peak Amplitude if not already + if not peakAmpAnn and 'ann' in plot_type: + ax.annotate(f"Peak Amp.: {a0:.2f}", [f0+0.1*f0, a0]) + peakAmpAnn=True + + # Add peak line + if 'pa' not in plot_type and not peakLine: + ax.hlines([a0], ax.get_xlim()[0], f0, linestyles='dashed') + peakLine = True + if '2' in k: + # Peak is higher than 2x lowest point in f0-f0*4 + + # Plot the line threshold that the curve needs to cross + ax.plot([f0, f0_mult4], [a0_div2, a0_div2], color='tab:blue', marker='|', linestyle='dashed') + + + # Get minimum of curve in desired range + indexList=[] + fList = [] + for i, f in enumerate(hvsr_data.x_freqs['Z']): + if f >= f0 and f <= f0_mult4: + indexList.append(i) + fList.append(f) + + newCurveList= [] + newFreqList = [] + for ind in indexList: + if ind < len(hvsr_data.hvsr_curve): + newFreqList.append(hvsr_data.x_freqs['Z'][ind]) + newCurveList.append(hvsr_data.hvsr_curve[ind]) + curveTestList = list(np.ones_like(newFreqList) * a0_div2) + + if hvsr_data['BestPeak'][azimuth]['Report']['A(f+)'][-1] == sprit_utils.x_mark(): + lowf2 = float(hvsr_data['BestPeak'][azimuth]['Report']['A(f+)'].replace('Hz', '').replace('-', '').split()[-3]) + hif2 = float(hvsr_data['BestPeak'][azimuth]['Report']['A(f+)'].replace('Hz', '').replace('-', '').split()[-2]) + ym = float(hvsr_data['BestPeak'][azimuth]['Report']['A(f+)'].replace('Hz', '').replace('-', '').split()[3]) + yp = min(newCurveList) + ax.fill_betweenx(y=[ym, yp], x1=lowf2, x2=hif2, alpha=0.1, color='r') + else: + #fpass = float(hvsr_data['BestPeak'][azimuth]['Report']['A(f+)'].replace('Hz', '').replace('-', '').split()[3]) + #fpassAmp = float(hvsr_data['BestPeak'][azimuth]['Report']['A(f+)'].replace('Hz', '').replace('-', '').split()[5]) + ax.fill_between(newFreqList, y1=newCurveList, y2=curveTestList, where=np.array(newCurveList)<=a0_div2, color='g', alpha=0.2) + minF = newFreqList[np.argmin(newCurveList)] + minA = min(newCurveList) + ax.plot([minF, minF, minF], [0, minA, a0_div2], marker='.', color='g', linestyle='dotted') + + # Plot the Peak Point if not already + if not peakPoint: + ax.scatter([f0], [a0], marker="o", facecolor='none', edgecolor='k') + peakPoint=True + + # Annotate the amplitude of peak point if not already + if not peakAmpAnn and 'ann' in plot_type: + ax.annotate(f"Peak Amp.: {a0:.2f}", [f0+0.1*f0, a0]) + peakAmpAnn=True + + if 'pa' not in plot_type and not peakLine: + ax.hlines([a0], ax.get_xlim()[0], f0, linestyles='dashed') + peakLine = True + if '3' in k: + if 'c' in k: + #Plot curve test3 + lowfc3 = hvsr_data['BestPeak'][azimuth]['Report']['σ_A(f)'].split(' ')[4].split('-')[0] + hifc3 = hvsr_data['BestPeak'][azimuth]['Report']['σ_A(f)'].split(' ')[4].split('-')[1].replace('Hz', '') + pass # May not even finish this + + lcolor='r' + if f0 > 2: + lcolor='g' + + if 'c' not in k or all(num in k for num in ["1", "2", "3", "4", "5", "6"]): + ax.hlines([2], ax.get_xlim()[0], ax.get_xlim()[1], color='tab:blue', linestyles='dashed') + ax.plot([f0, f0], [2, a0], linestyle='dotted', color=lcolor) + + if 'pa' not in plot_type: + ax.hlines([a0], ax.get_xlim()[0], f0, linestyles='dashed') + ax.scatter([f0], [a0], marker="o", facecolor='none', edgecolor='k') + peakPoint = True + peakLine = True + if '4' in k: + lowf4 = float(hvsr_data['BestPeak'][azimuth]['Report']['P-'].split(' ')[0]) + hif4 = float(hvsr_data['BestPeak'][azimuth]['Report']['P+'].split(' ')[0]) + m2Max = hvsr_data.x_freqs["Z"][np.argmax(hvsr_data.hvsrm2)]#, np.max(hvsr_data.hvsrm2)) + p2Max = hvsr_data.x_freqs["Z"][np.argmax(hvsr_data.hvsrp2)]#, np.max(hvsr_data.hvsrp2)) + + # ax.vlines([f0*0.95, f0*1.05], [0,0], [ax.get_xlim()[1],ax.get_xlim()[1]]) + ax.fill_betweenx(np.linspace(0, ax.get_xlim()[1]), x1=f0*0.95, x2=f0*1.05, color='tab:blue', alpha=0.3) + + mcolor = 'r' + pcolor = 'r' + if hvsr_data['BestPeak'][azimuth]['Report']['P-'][-1] == sprit_utils.check_mark(): + mcolor='g' + if hvsr_data['BestPeak'][azimuth]['Report']['P+'][-1] == sprit_utils.check_mark(): + pcolor='g' + + print(lowf4, hif4) + + ax.scatter([lowf4, hif4], [np.max(hvsr_data.hvsrm2[azimuth]), np.max(hvsr_data.hvsrp2[azimuth])], c=[mcolor, pcolor], marker='x') + + if not peakPoint: + ax.scatter([f0], [a0], marker="o", facecolor='none', edgecolor='k') + peakPoint = True + if '5' in k: + sf = float(hvsr_data['BestPeak'][azimuth]['Report']['Sf'].split(' ')[4].strip('()')) + sfp = f0+sf + sfm = f0-sf + + sfLim = float(hvsr_data['BestPeak'][azimuth]['Report']['Sf'].split(' ')[-2]) + sfLimp = f0+sfLim + sfLimm = f0-sfLim + + if hvsr_data['BestPeak'][azimuth]['Report']['Sf'][-1] == sprit_utils.check_mark(): + xColor = 'g' + else: + xColor='r' + + ax.scatter([sfLimm, sfLimp], [a0, a0], marker='|', c='tab:blue') + ax.scatter([sfm, sfp], [a0, a0], marker='x', c=xColor) + ax.plot([sfLimm, sfLimp], [a0, a0], color='tab:blue') + if not peakPoint: + ax.scatter([f0], [a0], marker="o", facecolor='none', edgecolor='k') + peakPoint = True + if '6' in k: + sa = float(hvsr_data['BestPeak'][azimuth]['Report']['Sa'].split(' ')[4].strip('()')) + sap = a0+sa + sam = a0-sa + + saLim = float(hvsr_data['BestPeak'][azimuth]['Report']['Sa'].split(' ')[-2]) + saLimp = a0+saLim + saLimm = a0-saLim + + if hvsr_data['BestPeak'][azimuth]['Report']['Sa'][-1] == sprit_utils.check_mark(): + xColor = 'g' + else: + xColor='r' + + ax.scatter([f0, f0], [saLimm, saLimp], marker='_', c='tab:blue') + ax.scatter([f0, f0],[sam, sap], marker='x', c=xColor) + ax.plot([f0, f0],[saLimm, saLimp], color='tab:blue') + if not peakPoint: + ax.scatter([f0], [a0], marker="o", facecolor='none', edgecolor='k') + peakPoint = True + + # Plot frequency search range bars + if 'fr' in k: + lowPeakSearchThresh = hvsr_data.peak_freq_range[0] + hiPeakSearchThresh = hvsr_data.peak_freq_range[1] + + frStyleDict = {'linestyle':'dashed', 'facecolors':'#1B060544', 'edgecolors':'#000000'} + + ax.fill_betweenx(ylim, [xlim[0], xlim[0]],[lowPeakSearchThresh,lowPeakSearchThresh], **frStyleDict) + ax.fill_betweenx(ylim, [hiPeakSearchThresh, hiPeakSearchThresh],[xlim[1],xlim[1]], **frStyleDict) + + # Plot individual components + if 'c' in k and 'test' not in k: #Spectrogram uses a different function, so c is unique to the component plot flag + plotSuff = plotSuff+'IndComponents_' + + if 'c' not in plot_type[0]:#This is part of the hvsr axis + #fig.tight_layout() + axis2 = ax.twinx() + compAxis = axis2 + #axis2 = plt.gca() + #fig = plt.gcf() + compAxis.set_ylabel('Amplitude'+'\n[m2/s4/Hz] [dB]') + compAxis.set_facecolor([0,0,0,0]) + legendLoc2 = 'upper left' + else: + ax.set_title('') #Remove title + ax.sharex(kwargs['axes']['hvsr']) + compAxis = ax + legendLoc2 = 'upper right' + + minY = [] + maxY = [] + keyList = ['Z', 'E', 'N'] + for az in hvsr_data.hvsr_az.keys(): + keyList.append(az) + keyList.sort() + hvsrDF = hvsr_data.hvsr_windows_df + for key in keyList: + minY.append(hvsr_data['psd_values_tavg'][key].min()) + maxY.append(hvsr_data['psd_values_tavg'][key].max()) + #maxY.append(np.stack(hvsr_data.hvsr_windows_df['Use']['psd_values_'+key])) + minY = min(minY) + maxY = max(maxY) + if maxY > 20: + maxY = max(hvsr_data['hvsr_curve']) * 1.15 + rng = maxY-minY + pad = abs(rng * 0.15) + ylim = [minY-pad, maxY+pad+pad] + compAxis.set_ylabel('COMPONENTS\nAmplitude\n[m2/s4/Hz] [dB]') + compAxis.set_ylim(ylim) + yLoc = min(ylim) - abs(ylim[1]-ylim[0]) * 0.05 + ax.text(x=xlim[0], y=yLoc, s=xlabel, + fontsize='x-small', horizontalalignment='right', verticalalignment='top', + bbox=dict(facecolor='w', edgecolor='none', alpha=0.8, pad=0.1)) + #Modify based on whether there are multiple charts + if plotHVSR: + linalpha = 0.2 + stdalpha = 0.05 + else: + linalpha=1 + stdalpha=0.2 + + #Plot individual components + azsLabeled = False + y={} + psdKeys = list(hvsr_data['psd_values_tavg']) + psdKeys.sort() + for key in psdKeys: + if key.upper() == 'Z': + pltColor = 'k' + elif key.upper() =='E': + pltColor = 'b' + elif key.upper() == 'N': + pltColor = 'r' + else: + pltColor = 'g' + + if key in keyList or key == azimuth: + if hvsr_data.horizontal_method == 'Single Azimuth' and key in ['E', 'N']: + pass + else: + y[key] = hvsr_data['psd_values_tavg'][key][:-1] + # Make sure azimuth only shows up in legend once + if pltColor == 'g': + if azsLabeled: + leglabel = None + else: + leglabel = 'Azimuths' + azsLabeled = True + else: + leglabel = key + + compAxis.plot(x, y[key], c=pltColor, label=leglabel, alpha=linalpha) + if '-s' not in plot_type: + compAxis.fill_between(x, hvsr_data['ppsd_std_vals_m'][key][:-1], hvsr_data['ppsd_std_vals_p'][key][:-1], color=pltColor, alpha=stdalpha) + + if plot_type[0] != 'c': + compAxis.legend(loc=legendLoc2) + else: + ax.legend(loc=legendLoc, ncols = len(psdKeys), + borderaxespad=0.1, columnspacing=1,markerfirst=False, reverse=True, borderpad=0.2) + else: + yLoc = min(ylim) - abs(ylim[1]-ylim[0]) * 0.05 + ax.text(x=xlim[0], y=yLoc, s=xlabel, + fontsize='x-small', horizontalalignment='right', verticalalignment='top', + bbox=dict(facecolor='w', edgecolor='none', alpha=0.8, pad=0.1)) + + bbox = ax.get_window_extent() + bboxStart = bbox.__str__().find('Bbox(',0,50)+5 + bboxStr = bbox.__str__()[bboxStart:].split(',')[:4] + axisbox = [] + for i in bboxStr: + i = i.split('=')[1] + if ')' in i: + i = i[:-1] + axisbox.append(float(i)) + + if kwargs['show_legend']: + ax.legend(loc=legendLoc,bbox_to_anchor=(1.05, 1)) + + __plot_current_fig(save_dir=save_dir, + filename=filename, + fig=fig, ax=ax, + plot_suffix=plotSuff, + user_suffix=save_suffix, + show_plot=show_plot) + + return fig, ax + + +# Private function to help for when to show and format and save plots +def __plot_current_fig(save_dir, filename, fig, ax, plot_suffix, user_suffix, show_plot): + """Private function to support plot_hvsr, for plotting and showing plots""" + #plt.gca() + #plt.gcf() + #fig.tight_layout() #May need to uncomment this + + #plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0) + + if save_dir is not None: + outFile = save_dir+'/'+filename+'_'+plot_suffix+str(datetime.datetime.today().date())+'_'+user_suffix+'.png' + fig.savefig(outFile, bbox_inches='tight', pad_inches=0.2) + if show_plot: + fig.canvas.draw()#.show() + #fig.tight_layout() + #plt.ion() + return + + +# Plot specgtrogram, private supporting function for plot_hvsr +def _plot_specgram_hvsr(hvsr_data, fig=None, ax=None, azimuth='HV', save_dir=None, save_suffix='',**kwargs): + """Private function for plotting average spectrogram of all three channels from ppsds + """ + # Get all input parameters + if fig is None and ax is None: + fig, ax = plt.subplots() + + if 'kwargs' in kwargs.keys(): + kwargs = kwargs['kwargs'] + + if 'spec' in kwargs.keys(): + del kwargs['spec'] + + if 'p' in kwargs.keys(): + peak_plot=True + del kwargs['p'] + else: + peak_plot=False + + if 'ann' in kwargs.keys(): + annotate=True + del kwargs['ann'] + else: + annotate=False + + if 'all' in kwargs.keys(): + show_all_peaks = True + del kwargs['all'] + else: + show_all_peaks = False + + if 'tp' in kwargs.keys(): + show_all_time_peaks = True + del kwargs['tp'] + else: + show_all_time_peaks = False + + if 'grid' in kwargs.keys(): + ax.grid(which=kwargs['grid'], alpha=0.25) + del kwargs['grid'] + + if 'ytype' in kwargs: + if kwargs['ytype']=='freq': + ylabel = 'Frequency [Hz]' + del kwargs['ytype'] + else: + ylabel = 'Period [s]' + del kwargs['ytype'] + else: + ylabel='Frequency [Hz]' + + if 'detrend' in kwargs.keys(): + detrend= kwargs['detrend'] + del kwargs['detrend'] + else: + detrend=True + + if 'colorbar' in kwargs.keys(): + colorbar = kwargs['colorbar'] + del kwargs['colorbar'] + else: + colorbar=True + + if 'cmap' in kwargs.keys(): + pass + else: + kwargs['cmap'] = 'turbo' + + hvsrDF = hvsr_data['hvsr_windows_df'] + used = hvsrDF['Use'].astype(bool) + notused = ~hvsrDF['Use'].astype(bool) + + # Setup + ppsds = hvsr_data['ppsds']#[k]['current_times_used'] + import matplotlib.dates as mdates + anyKey = list(ppsds.keys())[0] + + # Get data + psdArr = np.stack(hvsrDF['HV_Curves'].apply(np.flip)) + useArr = np.array(hvsrDF['Use']) + useArr = np.tile(useArr, (psdArr.shape[1], 1)).astype(int) + useArr = np.clip(useArr, a_min=0.15, a_max=1) + + # Get times + xmin = hvsrDF['TimesProcessed_MPL'].min() + xmax = hvsrDF['TimesProcessed_MPL'].max() + + #Format times + tTicks = mdates.MinuteLocator(byminute=range(0,60,5)) + ax.xaxis.set_major_locator(tTicks) + tTicks_minor = mdates.SecondLocator(bysecond=[0]) + ax.xaxis.set_minor_locator(tTicks_minor) + + tLabels = mdates.DateFormatter('%H:%M') + ax.xaxis.set_major_formatter(tLabels) + ax.tick_params(axis='both', labelsize='x-small') + + #Get day label for bottom of chart + if hvsrDF.index[0].date() != hvsrDF.index[-1].date(): + day = str(hvsr_data['hvsr_windows_df'].index[0].date())+' - '+str(hvsr_data['hvsr_windows_df'].index[-1].date()) + else: + day = str(hvsr_data['hvsr_windows_df'].index[0].date()) + + #Get extents + ymin = hvsr_data['input_params']['hvsr_band'][0] + ymax = hvsr_data['input_params']['hvsr_band'][1] + + freqticks = np.flip(hvsr_data['x_freqs'][anyKey]) + yminind = np.argmin(np.abs(ymin-freqticks)) + ymaxind = np.argmin(np.abs(ymax-freqticks)) + freqticks = freqticks[yminind:ymaxind] + freqticks = np.logspace(np.log10(freqticks[0]), np.log10(freqticks[-1]), num=psdArr.shape[1]) + + extList = [xmin, xmax, ymin, ymax] + #Set up axes + ax.set_facecolor([0,0,0]) #Create black background for transparency to look darker + + # Interpolate into linear + new_indices = np.linspace(freqticks[0], freqticks[-1], len(freqticks)) + linList = [] + for row in psdArr: + row = row.astype(np.float16) + linList.append(np.interp(new_indices, freqticks, row)) + linear_arr = np.stack(linList) + + # Create chart + if 'subplot' in kwargs.keys(): + del kwargs['subplot'] + + # Get min and max of colormap normalization from array that is used + if 'vmin' not in kwargs.keys(): + kwargs['vmin'] = np.min(np.stack(hvsrDF[used]['HV_Curves'])) + if 'vmax' not in kwargs.keys(): + kwargs['vmax'] = np.max(np.stack(hvsrDF[used]['HV_Curves'])) + + im = ax.imshow(linear_arr.T, origin='lower', extent=extList, aspect='auto', alpha=useArr, **kwargs) + ax.tick_params(left=True, right=True, top=True) + + if peak_plot: + ax.axhline(hvsr_data['BestPeak'][azimuth]['f0'], c='k', linestyle='dotted', zorder=1000) + + if annotate: + if float(hvsr_data['BestPeak'][azimuth]['f0']) < 1: + boxYPerc = 0.998 + vertAlign = 'top' + else: + boxYPerc = 0.002 + vertAlign = 'bottom' + xLocation = float(xmin) + (float(xmax)-float(xmin))*0.99 + yLocation = hvsr_data['input_params']['hvsr_band'][0] + (hvsr_data['input_params']['hvsr_band'][1]-hvsr_data['input_params']['hvsr_band'][0])*(boxYPerc) + ann = ax.text(x=xLocation, y=yLocation, fontsize='x-small', s=f"Peak at {hvsr_data['BestPeak'][azimuth]['f0']:0.2f} Hz", ha='right', va=vertAlign, + bbox={'alpha':0.8, 'edgecolor':None, 'linewidth':0, 'fc':'w', 'pad':0.3}) + + if show_all_time_peaks: + timeVals = [] + peakFreqs = [] + for tIndex, pFreqs in enumerate(hvsrDF[used]['CurvesPeakFreqs_'+azimuth]): + endWindow = hvsrDF.iloc[tIndex]['TimesProcessed_MPLEnd'] + startWindow = hvsrDF.iloc[tIndex]['TimesProcessed_MPL'] + midTime = (endWindow + startWindow) / 2 + for f in pFreqs: + timeVals.append(midTime) + peakFreqs.append(f) + ax.scatter(timeVals, peakFreqs, marker="^", facecolors='#00000000', edgecolors='#00000088',s=12) + + if show_all_peaks: + ax.hlines(hvsr_data['hvsr_peak_freqs'][azimuth], ax.get_xlim()[0], ax.get_xlim()[1], colors='gray', alpha=0.666, linestyles='dotted', zorder=999) + + xLoc = xmin + (xmax - xmin) * 0.001 + yLoc = ymin + (ymax - ymin) * 0.97 + ax.text(x=xLoc, y=yLoc, s=day, + fontsize='small', horizontalalignment='left', verticalalignment='top', + bbox=dict(facecolor='w', edgecolor=None, linewidth=0, alpha=0.8, pad=0.2)) + + if colorbar: + cbar = plt.colorbar(mappable=im, orientation='horizontal') + cbar.set_label('H/V Ratio') + + #Set x and y labels + yLoc = ymin - (ymin * 2.5e-1) + ax.text(x=xmin, y=yLoc,s="UTC Time", + fontsize='x-small', horizontalalignment='right', verticalalignment='top', + bbox=dict(facecolor='w', edgecolor='none', alpha=0.8, pad=0.1)) + ax.set_ylabel(ylabel, fontsize='x-small') + ax.set_yscale('log') + + #plt.sca(ax) + #plt.rcParams['figure.dpi'] = 500 + #plt.rcParams['figure.figsize'] = (12,4) + fig.canvas.draw() + + return fig, ax + + +# Plot spectrogram from stream +def _plot_specgram_stream(stream, params=None, component='Z', stack_type='linear', detrend='mean', dbscale=True, fill_gaps=None,fig=None, ax=None, cmap_per=[0.1,0.9], ylimstd=5, show_plot=False, return_fig=True, **kwargs): + """Function for plotting spectrogram in a nice matplotlib chart from an obspy.stream + + For more details on main function being called, see https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.specgram.html + + Parameters + ---------- + stream : obspy.core.stream.Stream object + Stream for which to plot spectrogram + params : dict, optional + If dict, will read the hvsr_band from the a dictionary with a key ['hvsr_band'] (like the parameters dictionary). Otherwise, can read in the hvsr_band as a two-item list. Or, if None, defaults to [0.4,40], by default None. + component : str or list, default='Z' + If string, should be one character long component, by default 'Z.' If list, can contain 'E', 'N', 'Z', and will stack them per stack_type and stream.stack() method in obspy to make spectrogram. + stack_type : str, default = 'linear' + Parameter to be read directly into stack_type parameter of Stream.stack() method of obspy streams, by default 'linear'. See https://docs.obspy.org/packages/autogen/obspy.core.stream.Stream.stack.html + Only matters if more than one component used. + detrend : str, default = 'mean' + Parameter to be read directly into detrend parameter of matplotlib.pyplot.specgram, by default 'mean'. See: https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.specgram.html + dbscale : bool, default = True + If True, scale parameter of matplotlib.pyplot.specgram set to 'dB', by default True + return_fig : bool, default = True + Whether to return the figure from the function or just show it, by default True + cmap_per : list, default = [0.1, 0.9] + Two-item list wwith clip limits as percentage of values of colormap, so extremes do not taint colormap, by default [0.1,0.9] + + Returns + ------- + fig + If return_fig is True, matplotlib figure is returned + ax + If return_fig is True, matplotlib axis is returned + """ + og_stream = stream.copy() + + #Get the latest start time and earliest end times of all components + traceList = [] + maxStartTime = obspy.UTCDateTime(-1e10) #Go back pretty far (almost 400 years) to start with + minEndTime = obspy.UTCDateTime(1e10) + for comp in ['E', 'N', 'Z']: + #Get all traces from selected component in comp_st + if isinstance(stream.select(component=comp).merge()[0].data, np.ma.masked_array): + stream = stream.split() + comp_st = stream.select(component=comp).copy() + stream.merge() + if comp in component: + for tr in comp_st: + #Get all traces specified for use in one list + traceList.append(tr) + + if stream[0].stats.starttime > maxStartTime: + maxStartTime = stream[0].stats.starttime + if stream[0].stats.endtime < minEndTime: + minEndTime = stream[0].stats.endtime + + if isinstance(comp_st[0].data, np.ma.masked_array): + comp_st = comp_st.split() + + #Trim all traces to the same start/end time for total + for tr in traceList: + tr.trim(starttime=maxStartTime, endtime=minEndTime) + og_stream.trim(starttime=maxStartTime, endtime=minEndTime) + + #Combine all traces into single, stacked trace/stream + stream = obspy.Stream(traceList) + stream.merge() + + if len(stream)>1: + stream.stack(group_by='all', npts_tol=200, stack_type=stack_type) + + newFig= False + if fig is None and ax is None: + #Organize the chart layout + mosaic = [['spec'], + ['spec'], + ['spec'], + ['spec'], + ['spec'], + ['spec'], + ['signalz'], + ['signalz'], + ['signaln'], + ['signale']] + fig, ax = plt.subplot_mosaic(mosaic, sharex=True, gridspec_kw={'hspace':0.3}) + #fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True) + newFig = True + + data = stream[0].data + if isinstance(data, np.ma.MaskedArray) and fill_gaps is not None: + data = data.filled(fill_gaps) + sample_rate = stream[0].stats.sampling_rate + + if 'cmap' in kwargs.keys(): + cmap=kwargs['cmap'] + else: + cmap='turbo' + + if params is None: + hvsr_band = [0.4, 40] + else: + hvsr_band = params['hvsr_band'] + ymin = hvsr_band[0] + ymax = hvsr_band[1] + + if dbscale: + scale='dB' + else: + scale=None + with warnings.catch_warnings(): + warnings.simplefilter('ignore', category=RuntimeWarning) + spec, freqs, times, im = ax['spec'].specgram(x=data, Fs=sample_rate, detrend=detrend, scale_by_freq=True, scale=scale) + im.remove() + + difference_array = freqs-ymin + for i, d in enumerate(difference_array): + if d > 0: + if i-1 < 0: + i=1 + minfreqInd = i-1 + break + + difference_array = freqs-ymax + for i, d in enumerate(difference_array): + if d > 0: + maxfreqInd = i-1 + break + + array_displayed = spec[minfreqInd:maxfreqInd,:] + #freqs_displayed = freqs[minfreqInd:maxfreqInd] + #im.set_data(array_displayed) + vmin = np.nanpercentile(array_displayed, cmap_per[0]*100) + vmax = np.nanpercentile(array_displayed, cmap_per[1]*100) + + + decimation_factor = 10 + + sTime = stream[0].stats.starttime + timeList = {} + mplTimes = {} + + if isinstance(og_stream[0].data, np.ma.masked_array): + og_stream = og_stream.split() + og_stream.decimate(decimation_factor) + og_stream.merge() + + for tr in og_stream: + key = tr.stats.component + timeList[key] = [] + mplTimes[key] = [] + for t in np.ma.getdata(tr.times()): + newt = sTime + t + timeList[key].append(newt) + mplTimes[key].append(newt.matplotlib_date) + + #Ensure that the min and max times for each component are the same + for i, k in enumerate(mplTimes.keys()): + currMin = np.min(list(map(np.min, mplTimes[k]))) + currMax = np.max(list(map(np.max, mplTimes[k]))) + + if i == 0: + xmin = currMin + xmax = currMax + else: + if xmin > currMin: + xmin = currMin + if xmax < currMax: + xmax = currMax + + norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax) + im = ax['spec'].imshow(array_displayed, norm=norm, cmap=cmap, aspect='auto', interpolation=None, extent=[xmin,xmax,ymax,ymin]) + + ax['spec'].set_xlim([xmin, xmax]) + ax['spec'].set_ylim([ymin, ymax]) + ax['spec'].semilogy() + + #cbar = plt.colorbar(mappable=im) + #cbar.set_label('Power Spectral Density [dB]') + #stream.spectrogram(samp_rate=sample_rate, axes=ax, per_lap=0.75, log=True, title=title, cmap='turbo', dbscale=dbscale, show_plot=False) + + ax['spec'].xaxis_date() + ax['signalz'].xaxis_date() + ax['signaln'].xaxis_date() + ax['signale'].xaxis_date() + #tTicks = mdates.MinuteLocator(interval=5) + #ax[0].xaxis.set_major_locator(tTicks) + ax['signale'].xaxis.set_major_locator(mdates.MinuteLocator(byminute=range(0,60,5))) + ax['signale'].xaxis.set_major_formatter(mdates.DateFormatter('%H:%M')) + ax['signale'].xaxis.set_minor_locator(mdates.MinuteLocator(interval=1)) + ax['signale'].tick_params(axis='x', labelsize=8) + + ax['signalz'].plot(mplTimes['Z'],og_stream.select(component='Z')[0].data, color='k', linewidth=0.25) + ax['signaln'].plot(mplTimes['N'],og_stream.select(component='N')[0].data, color='k', linewidth=0.1) + ax['signale'].plot(mplTimes['E'],og_stream.select(component='E')[0].data, color='k', linewidth=0.1) + + ax['spec'].set_ylabel('Spectrogram: {}'.format(component)) + ax['signalz'].set_ylabel('Z') + ax['signaln'].set_ylabel('N') + ax['signale'].set_ylabel('E') + + for comp in mplTimes.keys(): + stD = np.abs(np.nanstd(np.ma.getdata(og_stream.select(component=comp)[0].data))) + dmed = np.nanmedian(np.ma.getdata(og_stream.select(component=comp)[0].data)) + key = 'signal'+comp.lower() + ax[key].set_ylim([dmed-ylimstd*stD, dmed+ylimstd*stD]) + + if params is None: + fig.suptitle('HVSR Site: Spectrogram and Data') + elif 'title' in kwargs.keys(): + fig.suptitle(kwargs['title']) + else: + if 'input_params' in params.keys(): + sitename = params['input_params']['site'] + else: + sitename = params['site'] + fig.suptitle('{}\nSpectrogram and Data'.format(sitename)) + + day = "{}-{}-{}".format(stream[0].stats.starttime.year, stream[0].stats.starttime.month, stream[0].stats.starttime.day) + ax['signale'].set_xlabel('UTC Time \n'+day) + + if newFig: + ogFigsize = matplotlib.rcParams['figure.figsize'] + fig = plt.gcf() + matplotlib.rcParams['figure.figsize'] = (40, 4) + #plt.rcParams['figure.dpi'] = 100 + #plt.rcParams['figure.figsize'] = (5,4) + #fig.tight_layout() + plt.rcParams['figure.figsize'] = ogFigsize + + if show_plot: + fig.canvas.draw() + plt.show() + + if return_fig: + return fig + + return + + +# HELPER functions for checking peaks +# Initialize peaks +def __init_peaks(_x, _y, _index_list, _hvsr_band, peak_freq_range=[0.4, 40], _min_peak_amp=1): + """ Initialize peaks. + + Creates dictionary with relevant information and removes peaks in hvsr curve that are not relevant for data analysis (outside HVSR_band) + + Parameters + ---------- + x : list-like obj + List with x-values (frequency or period values) + y : list-like obj + List with hvsr curve values + index_list : list or array_like + List with indices of peaks + _hvsr_band : list + Two-item list with low and high frequency to limit frequency range of data analysis extent + peak_freq_range : list + Two-item list with low and high frequency to limit frequency range for checking for peaks + _min_peak_amp : float + Minimum amplitude to be used for peak selection (to limit number of meaningless peaks found) + + Returns + ------- + _peak : list + List of dictionaries, one for each input peak + """ + + _peak = list() + for _i in _index_list: + if (_hvsr_band[0] <= _x[_i] <= _hvsr_band[1]) and (peak_freq_range[0] <= _x[_i] <= peak_freq_range[1]) and (_y[_i]>_min_peak_amp): + _peak.append({'f0': float(_x[_i]), 'A0': float(_y[_i]), + 'f-': None, 'f+': None, 'Sf': None, 'Sa': None, + 'Score': 0, + 'Report': {'Lw':'', 'Nc':'', 'σ_A(f)':'', 'A(f-)':'', 'A(f+)':'', 'A0': '', 'P+': '', 'P-': '', 'Sf': '', 'Sa': ''}, + 'PassList':{}, + 'PeakPasses':False}) + return _peak + + +# Check reliability of HVSR of curve +def __check_curve_reliability(hvsr_data, _peak, col_id='HV'): + """Tests to check for reliable H/V curve + + Tests include: + 1) Peak frequency is greater than 10 / window length (f0 > 10 / Lw) + f0 = peak frequency [Hz] + Lw = window length [seconds] + 2) Number of significant cycles (Nc) is greater than 200 (Nc(f0) > 200) + Nc = Lw * Nw * f0 + Lw = window length [sec] + Nw = Number of windows used in analysis + f0 = peak frequency [Hz] + 3) StDev of amplitude of H/V curve is less than 2 at all frequencies between 0.5f0 and 2f0 + (less than 3 if f0 is less than 0.5 Hz) + f0 = peak frequency [Hz] + StDev is a measure of the variation of all the H/V curves generated for each time window + Our main H/V curve is the median of these + + Parameters + ---------- + hvsr_data : dict + Dictionary containing all important information generated about HVSR curve + _peak : list + A list of dictionaries, with each dictionary containing information about each peak + + Returns + ------- + _peak : list + List of dictionaries, same as above, except with information about curve reliability tests added + """ + anyKey = list(hvsr_data['ppsds'].keys())[0]#Doesn't matter which channel we use as key + + delta = hvsr_data['ppsds'][anyKey]['delta'] + window_len = (hvsr_data['ppsds'][anyKey]['len'] * delta) #Window length in seconds + window_num = np.array(hvsr_data['psd_raw'][anyKey]).shape[0] + + for _i in range(len(_peak)): + # Test 1 + peakFreq= _peak[_i]['f0'] + test1 = peakFreq > 10/window_len + + nc = window_len * window_num * peakFreq + test2 = nc > 200 + + halfF0 = peakFreq/2 + doublef0 = peakFreq*2 + + + test3 = True + failCount = 0 + for i, freq in enumerate(hvsr_data['x_freqs'][anyKey][:-1]): + if freq >= halfF0 and freq <doublef0: + compVal = 2 + if peakFreq >= 0.5: + if hvsr_data['hvsr_log_std'][col_id][i] >= compVal: + test3=False + failCount +=1 + + else: #if peak freq is less than 0.5 + compVal = 3 + if hvsr_data['hvsr_log_std'][col_id][i] >= compVal: + test3=False + failCount +=1 + + if test1: + _peak[_i]['Report']['Lw'] = f'{round(peakFreq,3)} > {10/int(window_len):0.3} (10 / {int(window_len)}) {sprit_utils.check_mark()}' + else: + _peak[_i]['Report']['Lw'] = f'{round(peakFreq,3)} > {10/int(window_len):0.3} (10 / {int(window_len)}) {sprit_utils.x_mark()}' + + if test2: + _peak[_i]['Report']['Nc'] = f'{int(nc)} > 200 {sprit_utils.check_mark()}' + else: + _peak[_i]['Report']['Nc'] = f'{int(nc)} > 200 {sprit_utils.x_mark()}' + + if test3: + _peak[_i]['Report']['σ_A(f)'] = f'H/V Amp. St.Dev. for {peakFreq*0.5:0.3f}-{peakFreq*2:0.3f}Hz < {compVal} {sprit_utils.check_mark()}' + else: + _peak[_i]['Report']['σ_A(f)'] = f'H/V Amp. St.Dev. for {peakFreq*0.5:0.3f}-{peakFreq*2:0.3f}Hz < {compVal} {sprit_utils.x_mark()}' + + _peak[_i]['PassList']['WinLen'] = test1 + _peak[_i]['PassList']['SigCycles'] = test2 + _peak[_i]['PassList']['LowCurveStD'] = test3 + return _peak + + +# Check clarity of peaks +def __check_clarity(_x, _y, _peak, do_rank=True): + """Check clarity of peak amplitude(s) + + Test peaks for satisfying amplitude clarity conditions as outlined by SESAME 2004: + - there exist one frequency f-, lying between f0/4 and f0, such that A0 / A(f-) > 2 + - there exist one frequency f+, lying between f0 and 4*f0, such that A0 / A(f+) > 2 + - A0 > 2 + + Parameters + ---------- + x : list-like obj + List with x-values (frequency or period values) + y : list-like obj + List with hvsr curve values + _peak : list + List with dictionaries for each peak, containing info about that peak + do_rank : bool, default=False + Include Rank in output + + Returns + ------- + _peak : list + List of dictionaries, each containing the clarity test information for the different peaks that were read in + """ + global max_rank + + # Test each _peak for clarity. + if do_rank: + max_rank += 1 + + if np.array(_x).shape[0] == 1000: + jstart = len(_y)-2 + else: + jstart = len(_y)-1 + + + for _i in range(len(_peak)): + #Initialize as False + _peak[_i]['f-'] = sprit_utils.x_mark() + _peak[_i]['Report']['A(f-)'] = f"H/V curve > {_peak[_i]['A0']/2:0.2f} for all {_peak[_i]['f0']/4:0.2f} Hz-{_peak[_i]['f0']:0.3f} Hz {sprit_utils.x_mark()}" + _peak[_i]['PassList']['ProminenceLow'] = False #Start with assumption that it is False until we find an instance where it is True + for _j in range(jstart, -1, -1): + # There exist one frequency f-, lying between f0/4 and f0, such that A0 / A(f-) > 2. + if (float(_peak[_i]['f0']) / 4.0 <= _x[_j] < float(_peak[_i]['f0'])) and float(_peak[_i]['A0']) / _y[_j] > 2.0: + _peak[_i]['Score'] += 1 + _peak[_i]['f-'] = '%10.3f %1s' % (_x[_j], sprit_utils.check_mark()) + _peak[_i]['Report']['A(f-)'] = f"Amp. of H/V Curve @{_x[_j]:0.3f}Hz ({_y[_j]:0.3f}) < {_peak[_i]['A0']/2:0.3f} {sprit_utils.check_mark()}" + _peak[_i]['PassList']['ProminenceLow'] = True + break + else: + pass + + if do_rank: + max_rank += 1 + for _i in range(len(_peak)): + #Initialize as False + _peak[_i]['f+'] = sprit_utils.x_mark() + _peak[_i]['Report']['A(f+)'] = f"H/V curve > {_peak[_i]['A0']/2:0.2f} for all {_peak[_i]['f0']:0.2f} Hz-{_peak[_i]['f0']*4:0.3f} Hz {sprit_utils.x_mark()}" + _peak[_i]['PassList']['ProminenceHi'] = False + for _j in range(len(_x) - 1): + + # There exist one frequency f+, lying between f0 and 4*f0, such that A0 / A(f+) > 2. + if float(_peak[_i]['f0']) * 4.0 >= _x[_j] > float(_peak[_i]['f0']) and \ + float(_peak[_i]['A0']) / _y[_j] > 2.0: + _peak[_i]['Score'] += 1 + _peak[_i]['f+'] = f"{_x[_j]:0.3f} {sprit_utils.check_mark()}" + _peak[_i]['Report']['A(f+)'] = f"H/V Curve at {_x[_j]:0.2f} Hz: {_y[_j]:0.2f} < {_peak[_i]['A0']/2:0.2f} (f0/2) {sprit_utils.check_mark()}" + _peak[_i]['PassList']['ProminenceHi'] = True + break + else: + pass + + # Amplitude Clarity test + # Only peaks with A0 > 2 pass + if do_rank: + max_rank += 1 + _a0 = 2.0 + for _i in range(len(_peak)): + + if float(_peak[_i]['A0']) > _a0: + _peak[_i]['Report']['A0'] = f"Amplitude of peak ({_peak[_i]['A0']:0.2f}) > {int(_a0)} {sprit_utils.check_mark()}" + _peak[_i]['Score'] += 1 + _peak[_i]['PassList']['AmpClarity'] = True + else: + _peak[_i]['Report']['A0'] = '%0.2f > %0.1f %1s' % (_peak[_i]['A0'], _a0, sprit_utils.x_mark()) + _peak[_i]['PassList']['AmpClarity'] = False + + return _peak + + +# Check the stability of the frequency peak +def __check_freq_stability(_peak, _peakm, _peakp): + """Test peaks for satisfying stability conditions + + Test as outlined by SESAME 2004: + - the _peak should appear at the same frequency (within a percentage ± 5%) on the H/V + curves corresponding to mean + and - one standard deviation. + + Parameters + ---------- + _peak : list + List of dictionaries containing input information about peak, without freq stability test + _peakm : list + List of dictionaries containing input information about peakm (peak minus one StDev in freq) + _peakp : list + List of dictionaries containing input information about peak (peak plus one StDev in freq) + + Returns + ------- + _peak : list + List of dictionaries containing output information about peak test + """ + global max_rank + + # check σf and σA + max_rank += 1 + + # First check below + # Initialize list + _found_m = list() + for _i in range(len(_peak)): + _dx = 1000000. + # Initialize test as not passing for this frequency + _found_m.append(False) + _peak[_i]['Report']['P-'] = sprit_utils.x_mark() + # Iterate through all time windows + for _j in range(len(_peakm)): + if abs(_peakm[_j]['f0'] - _peak[_i]['f0']) < _dx: + _index = _j + _dx = abs(_peakm[_j]['f0'] - _peak[_i]['f0']) #_dx is difference between peak frequencies for each time window and main peak + if _peak[_i]['f0'] * 0.95 <= _peakm[_j]['f0'] <= _peak[_i]['f0'] * 1.05: + _peak[_i]['Report']['P-'] = f"{_peakm[_j]['f0']:0.2f} Hz within ±5% of {_peak[_i]['f0']:0.2f} Hz {sprit_utils.check_mark()}" + _found_m[_i] = True + break + if _peak[_i]['Report']['P-'] == sprit_utils.x_mark(): + _peak[_i]['Report']['P-'] = f"{_peakm[_j]['f0']:0.2f} Hz within ±5% of {_peak[_i]['f0']:0.2f} Hz {sprit_utils.x_mark()}" + + # Then Check above + _found_p = list() + for _i in range(len(_peak)): + _dx = 1000000. + _found_p.append(False) + _peak[_i]['Report']['P+'] = sprit_utils.x_mark() + for _j in range(len(_peakp)): + if abs(_peakp[_j]['f0'] - _peak[_i]['f0']) < _dx: + + _dx = abs(_peakp[_j]['f0'] - _peak[_i]['f0']) + if _peak[_i]['f0'] * 0.95 <= _peakp[_j]['f0'] <= _peak[_i]['f0'] * 1.05: + if _found_m[_i]: + _peak[_i]['Report']['P+'] = f"{_peakp[_j]['f0']:0.2f} Hz within ±5% of {_peak[_i]['f0']:0.2f} Hz {sprit_utils.check_mark()}" + _peak[_i]['Score'] += 1 + _peak[_i]['PassList']['FreqStability'] = True + else: + _peak[_i]['Report']['P+'] = f"{_peakp[_j]['f0']:0.2f} Hz within ±5% of {_peak[_i]['f0']:0.2f} Hz {sprit_utils.x_mark()}" + _peak[_i]['PassList']['FreqStability'] = False + break + else: + _peak[_i]['Report']['P+'] = f"{_peakp[_j]['f0']:0.2f} Hz within ±5% of {_peak[_i]['f0']:0.2f} Hz {sprit_utils.x_mark()}" + _peak[_i]['PassList']['FreqStability'] = False + if _peak[_i]['Report']['P+'] == sprit_utils.x_mark() and len(_peakp) > 0: + _peak[_i]['Report']['P+'] = f"{_peakp[_j]['f0']:0.2f} Hz within ±5% of {_peak[_i]['f0']:0.2f} Hz {sprit_utils.x_mark()}" + + return _peak + + +# Check stability +def __check_stability(_stdf, _peak, _hvsr_log_std, rank): + """Test peaks for satisfying stability conditions as outlined by SESAME 2004 + This includes: + - σf lower than a frequency dependent threshold ε(f) + - σA (f0) lower than a frequency dependent threshold θ(f), + + + Parameters + ---------- + _stdf : list + List with dictionaries containint frequency standard deviation for each peak + _peak : list + List of dictionaries containing input information about peak, without freq stability test + _hvsr_log_std : list + List of dictionaries containing log standard deviation along curve + rank : int + Integer value, higher value is "higher-ranked" peak, helps determine which peak is actual hvsr peak + + Returns + ------- + _peak : list + List of dictionaries containing output information about peak test + """ + + global max_rank + + # + # check σf and σA + # + if rank: + max_rank += 2 + for _i in range(len(_peak)): + _peak[_i]['Sf'] = _stdf[_i] + _peak[_i]['Sa'] = _hvsr_log_std[_i] + _this_peak = _peak[_i] + if _this_peak['f0'] < 0.2: + _e = 0.25 + if _stdf[_i] < _e * _this_peak['f0']: + _peak[_i]['Report']['Sf'] = f"St.Dev. of Peak Freq. ({_stdf[_i]:0.2f}) < {(_e * _this_peak['f0']):0.3f} {sprit_utils.check_mark()}" + _this_peak['Score'] += 1 + _this_peak['PassList']['LowStDev_Freq'] = True + else: + _peak[_i]['Report']['Sf'] = f"St.Dev. of Peak Freq. ({_stdf[_i]:0.2f}) < {(_e * _this_peak['f0']):0.3f} {sprit_utils.x_mark()}" + _this_peak['PassList']['LowStDev_Freq'] = False + + _t = 0.48 + if _hvsr_log_std[_i] < _t: + _peak[_i]['Report']['Sa'] = f"St.Dev. of Peak Amp. ({_hvsr_log_std[_i]:0.3f}) < {_t:0.2f} {sprit_utils.check_mark()}" + _this_peak['Score'] += 1 + _this_peak['PassList']['LowStDev_Amp'] = True + else: + _peak[_i]['Report']['Sa'] = f"St.Dev. of Peak Amp. ({_hvsr_log_std[_i]:0.3f}) < {_t:0.2f} {sprit_utils.check_mark()}" + _this_peak['PassList']['LowStDev_Amp'] = False + + elif 0.2 <= _this_peak['f0'] < 0.5: + _e = 0.2 + if _stdf[_i] < _e * _this_peak['f0']: + _peak[_i]['Report']['Sf'] = f"St.Dev. of Peak Freq. ({_stdf[_i]:0.2f}) < {(_e * _this_peak['f0']):0.3f} {sprit_utils.check_mark()}" + _this_peak['Score'] += 1 + _this_peak['PassList']['LowStDev_Freq'] = True + else: + _peak[_i]['Report']['Sf'] = f"St.Dev. of Peak Freq. ({_stdf[_i]:0.2f}) < {(_e * _this_peak['f0']):0.3f} {sprit_utils.x_mark()}" + _this_peak['PassList']['LowStDev_Freq'] = False + + _t = 0.40 + if _hvsr_log_std[_i] < _t: + _peak[_i]['Report']['Sa'] = f"St.Dev. of Peak Amp. ({_hvsr_log_std[_i]:0.3f}) < {_t:0.2f} {sprit_utils.check_mark()}" + _this_peak['Score'] += 1 + _this_peak['PassList']['LowStDev_Amp'] = True + else: + _peak[_i]['Report']['Sa'] = f"St.Dev. of Peak Amp. ({_hvsr_log_std[_i]:0.3f}) < {_t:0.2f} {sprit_utils.check_mark()}" + _this_peak['PassList']['LowStDev_Amp'] = False + + elif 0.5 <= _this_peak['f0'] < 1.0: + _e = 0.15 + if _stdf[_i] < _e * _this_peak['f0']: + _peak[_i]['Report']['Sf'] = f"St.Dev. of Peak Freq. ({_stdf[_i]:0.2f}) < {(_e * _this_peak['f0']):0.3f} {sprit_utils.check_mark()}" + _this_peak['Score'] += 1 + _this_peak['PassList']['LowStDev_Freq'] = True + else: + _peak[_i]['Report']['Sf'] = f"St.Dev. of Peak Freq. ({_stdf[_i]:0.2f}) < {(_e * _this_peak['f0']):0.3f} {sprit_utils.x_mark()}" + _this_peak['PassList']['LowStDev_Freq'] = False + + _t = 0.3 + if _hvsr_log_std[_i] < _t: + _peak[_i]['Report']['Sa'] = f"St.Dev. of Peak Amp. ({_hvsr_log_std[_i]:0.3f}) < {_t:0.2f} {sprit_utils.check_mark()}" + _this_peak['Score'] += 1 + _this_peak['PassList']['LowStDev_Amp'] = True + else: + _peak[_i]['Report']['Sa'] = f"St.Dev. of Peak Amp. ({_hvsr_log_std[_i]:0.3f}) < {_t:0.2f} {sprit_utils.check_mark()}" + _this_peak['PassList']['LowStDev_Amp'] = False + + elif 1.0 <= _this_peak['f0'] <= 2.0: + _e = 0.1 + if _stdf[_i] < _e * _this_peak['f0']: + _peak[_i]['Report']['Sf'] = f"St.Dev. of Peak Freq. ({_stdf[_i]:0.2f}) < {(_e * _this_peak['f0']):0.3f} {sprit_utils.check_mark()}" + _this_peak['Score'] += 1 + _this_peak['PassList']['LowStDev_Freq'] = True + else: + _peak[_i]['Report']['Sf'] = f"St.Dev. of Peak Freq. ({_stdf[_i]:0.2f}) < {(_e * _this_peak['f0']):0.3f} {sprit_utils.x_mark()}" + _this_peak['PassList']['LowStDev_Freq'] = False + + _t = 0.25 + if _hvsr_log_std[_i] < _t: + _peak[_i]['Report']['Sa'] = f"St.Dev. of Peak Amp. ({_hvsr_log_std[_i]:0.3f}) < {_t:0.2f} {sprit_utils.check_mark()}" + _this_peak['Score'] += 1 + _this_peak['PassList']['LowStDev_Amp'] = True + else: + _peak[_i]['Report']['Sa'] = f"St.Dev. of Peak Amp. ({_hvsr_log_std[_i]:0.3f}) < {_t:0.2f} {sprit_utils.check_mark()}" + _this_peak['PassList']['LowStDev_Amp'] = False + + elif _this_peak['f0'] > 0.2: + _e = 0.05 + if _stdf[_i] < _e * _this_peak['f0']: + _peak[_i]['Report']['Sf'] = f"St.Dev. of Peak Freq. ({_stdf[_i]:0.2f}) < {(_e * _this_peak['f0']):0.3f} {sprit_utils.check_mark()}" + _this_peak['Score'] += 1 + _this_peak['PassList']['LowStDev_Freq'] = True + else: + _peak[_i]['Report']['Sf'] = f"St.Dev. of Peak Freq. ({_stdf[_i]:0.2f}) < {(_e * _this_peak['f0']):0.3f} {sprit_utils.x_mark()}" + _this_peak['PassList']['LowStDev_Freq'] = False + + _t = 0.2 + if _hvsr_log_std[_i] < _t: + _peak[_i]['Report']['Sa'] = f"St.Dev. of Peak Amp. ({_hvsr_log_std[_i]:0.3f}) < {_t:0.2f} {sprit_utils.check_mark()}" + _this_peak['Score'] += 1 + _this_peak['PassList']['LowStDev_Amp'] = True + else: + _peak[_i]['Report']['Sa'] = f"St.Dev. of Peak Amp. ({_hvsr_log_std[_i]:0.3f}) < {_t:0.2f} {sprit_utils.check_mark()}" + _this_peak['PassList']['LowStDev_Freq'] = False + + return _peak + + +# Get frequency standard deviation +def __get_stdf(x_values, indexList, hvsrPeaks): + """Private function to get frequency standard deviation of peak(s) of interest, from multiple time-step HVSR curves + Paramaters + ---------- + + x_values : list or np.array + Array of x_values of dataset (frequency or period, most often frequency) + indexList : list + List of index/indices of peak(s) of interest, (index is within the x_values list) + + Returns + ------- + stdf : list + List of standard deviations of the peak + """ + stdf = list() + # Go through list containing all peak indices (often, just a single index of the main peak) + for index in indexList: + point = list() + # Iterate to get index for all rows of pandas series, + # each row contains a list of peak indices for the H/V curve from that time window + for j in range(len(hvsrPeaks)): + p = None + + # Iterate through each peak in each time window + for k in range(len(hvsrPeaks.iloc[j])): + if p is None: + p = hvsrPeaks.iloc[j][k] + else: + # Find frequency peak closest in the current time window to the (current) hvsr peak + if abs(index - hvsrPeaks.iloc[j][k]) < abs(index - p): + p = hvsrPeaks.iloc[j][k] + # p = hvsrPeaks[j][k] + # print(p=p1, p, p1) + if p is not None: + # It should never be None, this is just a double check + # Append the index of interest for that time window + point.append(p) + # Append the last index + point.append(index) + v = list() + + # Get all the actual frequencies (go through each index and extract the frequency from x_values) + for pl in range(len(point)): + v.append(x_values[point[pl]]) + + # stdf is a list in case there are multiple peaks to check. + # Most of the time this is only a 1-item list + # Contains std of frequencies of the peaks from each time window H/V curve that are closest to the main H/V peak + stdf.append(np.std(v)) + return stdf +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_modules/sprit/sprit_jupyter_UI.html b/docs/_modules/sprit/sprit_jupyter_UI.html new file mode 100644 index 00000000..f128dca2 --- /dev/null +++ b/docs/_modules/sprit/sprit_jupyter_UI.html @@ -0,0 +1,2393 @@ + + + + + + sprit.sprit_jupyter_UI — sprit 1.4 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for sprit.sprit_jupyter_UI

+"""Functions to create jupyter notebook widget UI
+"""
+
+import datetime
+import inspect
+import os
+import pathlib
+import tkinter as tk
+from tkinter import filedialog
+import webbrowser
+
+from zoneinfo import available_timezones
+
+import ipywidgets as widgets
+from IPython.display import display, clear_output
+import numpy as np
+import pandas as pd
+import plotly.express as px
+import plotly.graph_objs as go
+import plotly.subplots as subplots
+from scipy import signal
+
+try: #For distribution
+    from sprit import sprit_utils
+    from sprit import sprit_hvsr
+except: #For local testing
+    import sprit_hvsr 
+    import sprit_utils
+
+global hvsr_data
+    
+OBSPY_FORMATS =  ['AH', 'ALSEP_PSE', 'ALSEP_WTH', 'ALSEP_WTN', 'CSS', 'DMX', 'GCF', 'GSE1', 'GSE2', 'KINEMETRICS_EVT', 'KNET', 'MSEED', 'NNSA_KB_CORE', 'PDAS', 'PICKLE', 'Q', 'REFTEK130', 'RG16', 'SAC', 'SACXY', 'SEG2', 'SEGY', 'SEISAN', 'SH_ASC', 'SLIST', 'SU', 'TSPAIR', 'WAV', 'WIN', 'Y']
+
+
+[docs] +def get_default(func, param): + return inspect.signature(func).parameters[param].default
+ + +
+[docs] +def create_jupyter_ui(): + global hvsr_data + + ui_width = 20 + ui_height= 12 + global results_fig + global log_textArea + log_textArea = widgets.Textarea(value="SESSION LOG", disabled=True, layout={'height': '300px','width': '99%', 'overflow': 'scroll'}) + + # INPUT TAB + # Create a VBox for the accordions + input_HBox = widgets.HBox() + input_accordion_label_box = widgets.VBox() + input_accordion_box = widgets.VBox() + input_accordion = widgets.Accordion() + + # Metadata accordion + metadata_grid = widgets.GridspecLayout(7, 10) + network_textbox = widgets.Text(description='Network:', + placeholder=get_default(sprit_hvsr.input_params, 'network'), + value=get_default(sprit_hvsr.input_params, 'network'), + tooltip="input_params(network)") + + station_textbox = widgets.Text(description='Station:', + placeholder=get_default(sprit_hvsr.input_params, 'station'), + value=get_default(sprit_hvsr.input_params, 'station')) + + location_textbox = widgets.Text(description='Location:', + placeholder=get_default(sprit_hvsr.input_params, 'loc'), + value=get_default(sprit_hvsr.input_params, 'loc')) + + z_channel_textbox = widgets.Text(description='Z Channel:', + placeholder=get_default(sprit_hvsr.input_params, 'channels')[0], + value=get_default(sprit_hvsr.input_params, 'channels')[0]) + + e_channel_textbox = widgets.Text(description='E Channel:', + placeholder=get_default(sprit_hvsr.input_params, 'channels')[2], + value=get_default(sprit_hvsr.input_params, 'channels')[2]) + + n_channel_textbox = widgets.Text(description='N Channel:', + placeholder=get_default(sprit_hvsr.input_params, 'channels')[1], + value=get_default(sprit_hvsr.input_params, 'channels')[1]) + + + # Instrument Settings + inst_settings_text = widgets.Text(placeholder='Instrument Settings Filepath', layout=widgets.Layout(width='55%')) + instrument_read_button = widgets.Button(icon='fa-file-import',button_style='success', + layout=widgets.Layout(width='4%')) + instrument_settings_button = widgets.Button(description='Select .inst file', + layout=widgets.Layout(width='10%')) + inst_settings_hbox = widgets.HBox([inst_settings_text,instrument_read_button, instrument_settings_button]) + + def select_inst(event): + try: + if event.description == 'Select .inst file': + root = tk.Tk() + root.wm_attributes('-topmost', True) + root.withdraw() + inst_files = filedialog.askopenfilenames(defaultextension='.inst', filetypes=[('Inst', '.inst')], + title="Select Instrument Settings File") + if isinstance(inst_files, tuple): + pass + else: + inst_files = tuple(inst_files) + root.destroy() + else: + inst_files = tuple([inst_settings_text.value]) + + for i, inst_f in enumerate(inst_files): + inst_settings_text.value = pathlib.Path(inst_f).as_posix() + inst_settings = sprit_hvsr.import_settings(settings_import_path=pathlib.Path(inst_f).as_posix(), settings_import_type='instrument') + + # Go through all items and add them + if 'instrument' in inst_settings.keys(): + if inst_settings['instrument'] not in instrument_dropdown.options: + instrument_dropdown.options.append(inst_settings['instrument']) + instrument_dropdown.value = inst_settings['instrument'] + + if 'net' in inst_settings.keys(): + network_textbox.value = inst_settings['net'] + + if 'sta' in inst_settings.keys(): + station_textbox.value = inst_settings['sta'] + + if 'loc' in inst_settings.keys(): + location_textbox.value = inst_settings['loc'] + + if 'cha' in inst_settings.keys(): + for c in inst_settings['cha']: + if c.lower()[2]=='z': + z_channel_textbox.value = c + if c.lower()[2]=='e': + e_channel_textbox.value = c + if c.lower()[2] =='n': + n_channel_textbox.value = c + + if 'metapath' in inst_settings.keys(): + metadata_filepath.value = inst_settings['metapath'] + + if 'hvsr_band' in inst_settings.keys(): + hvsr_band_min_box.value = inst_settings['hvsr_band'][0] + hvsr_band_max_box.value = inst_settings['hvsr_band'][1] + + except Exception as e: + print(e) + instrument_settings_button.disabled=True + instrument_settings_button.description='Use Text Field' + + instrument_settings_button.on_click(select_inst) + instrument_read_button.on_click(select_inst) + + metadata_grid[0,:] = inst_settings_hbox + metadata_grid[1,0] = network_textbox + metadata_grid[2,0] = station_textbox + metadata_grid[3,0] = location_textbox + metadata_grid[4,0] = z_channel_textbox + metadata_grid[5,0] = e_channel_textbox + metadata_grid[6,0] = n_channel_textbox + + # Acquisition Accordion + instrument_grid = widgets.GridspecLayout(5, 10) + # Date Picker labelled "Acquisition Date" + acquisition_date_picker = widgets.DatePicker(description='Acq.Date:', + placeholder=datetime.datetime.today().date(), + value=datetime.datetime.today().date()) + + # Label that shows the Date currently selected in the Date Picker + acquisition_doy = widgets.IntText(description='DOY', + placeholder=f"{acquisition_date_picker.value.timetuple().tm_yday}", + value=f"{acquisition_date_picker.value.timetuple().tm_yday}", + layout=widgets.Layout(width='auto')) + + def on_acq_date_change(change): + acquisition_doy.value = acquisition_date_picker.value.timetuple().tm_yday + acquisition_date_picker.observe(on_acq_date_change) + + def on_doy_change(change): + curr_year = datetime.datetime.today().year + if acquisition_doy.value > datetime.datetime.today().timetuple().tm_yday: + curr_year -= 1 + acquisition_date_picker.value = (datetime.datetime(curr_year, 1, 1) + datetime.timedelta(days = acquisition_doy.value-1)).date() + acquisition_doy.observe(on_doy_change) + + # Time selector (hour and minute) labelled "Start Time". + try: + start_time_picker = widgets.TimePicker(description='Start Time:', + placeholder=datetime.time(0,0,0), + value=datetime.time(0,0,0), + layout=widgets.Layout(width='auto')) + except Exception as e: + start_time_picker = widgets.Text(description='Start Time:', + placeholder='00:00', + value='00:00', + layout=widgets.Layout(width='auto')) + + # Time selector (hour and minute) labelled "End Time". Same as Start Time otherwise. + try: + end_time_picker = widgets.TimePicker(description='End Time:', + placeholder=datetime.time(23,59), + value=datetime.time(23,59), + layout=widgets.Layout(width='auto')) + except Exception as e: + end_time_picker = widgets.Text(description='End Time:', + placeholder='23:59:59.999999', + value='23:59:59.999999', + layout=widgets.Layout(width='auto')) + + tzlist = list(available_timezones()) + tzlist.sort() + tzlist.remove('UTC') + tzlist.remove('US/Central') + tzlist.insert(0, 'US/Central') + tzlist.insert(0, 'UTC') + # A dropdown list with all the items from zoneinfo.available_timezones(), default 'UTC' + time_zone_dropdown = widgets.Dropdown(options=tzlist,value=get_default(sprit_hvsr.input_params, 'tzone'), + description='Time Zone:',layout=widgets.Layout(width='fill')) + + instrument_grid[0,0] = acquisition_date_picker + instrument_grid[0,1] = acquisition_doy + instrument_grid[1,0] = start_time_picker + instrument_grid[2,0] = end_time_picker + instrument_grid[3,0] = time_zone_dropdown + + # LOCATION ACCORDION + location_grid = widgets.GridspecLayout(4, 10) + # X coordinate input + xcoord_textbox = widgets.FloatText(description='X Coordinate:', tooltip='xcoord', + value=get_default(sprit_hvsr.input_params, 'xcoord'), + placeholder=get_default(sprit_hvsr.input_params, 'xcoord'), + layout=widgets.Layout(width='auto')) + location_grid[0, 0] = xcoord_textbox + + # Y coordinate input + ycoord_textbox = widgets.FloatText(description='Y Coordinate', tooltip='ycoord:', + value=get_default(sprit_hvsr.input_params, 'ycoord'), + placeholder=get_default(sprit_hvsr.input_params, 'ycoord'), + layout=widgets.Layout(width='auto')) + location_grid[1, 0] = ycoord_textbox + + # Z coordinate input + zcoord_textbox = widgets.FloatText(description='Z Coordinate', tooltip='elevation:', + value=get_default(sprit_hvsr.input_params, 'elevation'), + placeholder=get_default(sprit_hvsr.input_params, 'elevation'), + layout=widgets.Layout(width='auto')) + location_grid[2, 0] = zcoord_textbox + + # Z coordinate unit input + elevation_unit_textbox = widgets.Dropdown(options=[('Feet', 'feet'), ('Meters', 'meters')], + value=get_default(sprit_hvsr.input_params, 'elev_unit'), + description='Z Unit:', tooltip='elev_unit', + layout=widgets.Layout(width='auto')) + location_grid[2, 1] = elevation_unit_textbox + + # Input CRS input + input_crs_textbox = widgets.Text(description='Input CRS:', + layout=widgets.Layout(width='auto'), + placholder=get_default(sprit_hvsr.input_params, 'input_crs'), + value=get_default(sprit_hvsr.input_params, 'input_crs')) + location_grid[3, 0] = input_crs_textbox + + # Output CRS input + output_crs_textbox = widgets.Text(description='Output CRS:', + layout=widgets.Layout(width='auto'), + placholder=get_default(sprit_hvsr.input_params, 'output_crs'), + value=get_default(sprit_hvsr.input_params, 'output_crs')) + location_grid[3, 1] = output_crs_textbox + + # IO PARAMS ACCORDION + ioparam_grid = widgets.GridspecLayout(6, 10) + + # Data format (for obspy format to use to read in) + data_format_dropdown = widgets.Dropdown( + options=OBSPY_FORMATS, + value='MSEED', + description='Data Formats:', layout=widgets.Layout(width='auto')) + + hvsr_band_min_box = widgets.FloatText(description='HVSR Band [Hz]', style={'description_width': 'initial'}, + placeholder=get_default(sprit_hvsr.input_params, 'hvsr_band')[0], + value=get_default(sprit_hvsr.input_params, 'hvsr_band')[0]) + hvsr_band_max_box = widgets.FloatText(placeholder=get_default(sprit_hvsr.input_params, 'hvsr_band')[1], + value=get_default(sprit_hvsr.input_params, 'hvsr_band')[1]) + hvsr_band_hbox = widgets.HBox([hvsr_band_min_box, hvsr_band_max_box],layout=widgets.Layout(width='auto')) + + + peak_freq_range_min_box = widgets.FloatText(description='Peak Range [Hz]',placeholder=get_default(sprit_hvsr.input_params, 'peak_freq_range')[0], + value=get_default(sprit_hvsr.input_params, 'peak_freq_range')[0], + style={'description_width': 'initial'}, layout=widgets.Layout(width='auto')) + peak_freq_range_max_box = widgets.FloatText(placeholder=get_default(sprit_hvsr.input_params, 'peak_freq_range')[1], + value=get_default(sprit_hvsr.input_params, 'peak_freq_range')[1],layout=widgets.Layout(width='auto')) + peak_freq_range_hbox = widgets.HBox([peak_freq_range_min_box, peak_freq_range_max_box],layout=widgets.Layout(width='auto')) + + + # A dropdown labeled "Detrend type" with "Spline", "Polynomial", or "None" + detrend_type_dropdown = widgets.Dropdown(options=[('Spline', 'spline'), ('Polynomial', 'polynomial'), ('None', 'none')], + description='Detrend Type:', layout=widgets.Layout(width='auto')) + detrend_order = widgets.FloatText(description='Order:', tooltip='detrend_order', placeholder=get_default(sprit_hvsr.fetch_data, 'detrend_order'), + value=get_default(sprit_hvsr.fetch_data, 'detrend_order'),layout=widgets.Layout(width='auto')) + + # A text to specify the trim directory + trim_directory = widgets.Text(description='Trim Dir.:', value="None",#pathlib.Path().home().as_posix(), + layout=widgets.Layout(width='auto')) + trim_export_dropdown = widgets.Dropdown( + options=OBSPY_FORMATS, + value='MSEED', + description='Trim Format:', layout=widgets.Layout(width='auto')) + trim_directory_upload = widgets.FileUpload( + accept='', + multiple=False, layout=widgets.Layout(width='auto')) + + # Processing Settings + proc_settings_text = widgets.Text(placeholder='Instrument Settings Filepath', layout=widgets.Layout(width='55%')) + proc_settings_read_button = widgets.Button(icon='fa-file-import',button_style='success', + layout=widgets.Layout(width='4%')) + proc_settings_browse_button = widgets.Button(description='Select .proc file', + layout=widgets.Layout(width='10%')) + proc_settings_hbox = widgets.HBox([proc_settings_text, proc_settings_read_button, proc_settings_browse_button]) + + excluded_params = ['hvsr_data', 'params', 'hvsr_results'] + funcList = [sprit_hvsr.fetch_data, sprit_hvsr.remove_noise, + sprit_hvsr.generate_ppsds, sprit_hvsr.process_hvsr, + sprit_hvsr.remove_outlier_curves, sprit_hvsr.check_peaks, + sprit_hvsr.get_report] + + def select_proc(event): + try: + if event.description == 'Select .proc file': + root = tk.Tk() + root.wm_attributes('-topmost', True) + root.withdraw() + proc_files = filedialog.askopenfilenames(defaultextension='.proc', filetypes=[('PROC', '.proc')], + title="Select Processing Settings File") + if isinstance(proc_files, tuple): + pass + else: + proc_files = tuple(proc_files) + root.destroy() + else: + proc_files = tuple([proc_settings_text.value]) + + for i, proc_f in enumerate(proc_files): + proc_settings_text.value = pathlib.Path(proc_f).as_posix() + proc_settings = sprit_hvsr.import_settings(settings_import_path=pathlib.Path(proc_f).as_posix(), settings_import_type='processing') + + for func, params in proc_settings.items(): + if func in widget_param_dict.keys(): + for prm, val in params.items(): + if prm in widget_param_dict[func].keys(): + #print(prm, ':', widget_param_dict[func][prm],' | ', val) + if val is None or val=='None': + val='none' + if prm == 'export_format': + val = val.upper() + if prm == 'smooth': + if val is True: + val = 51 + if prm == 'resample': + if val is True: + val = 1000 + if isinstance(widget_param_dict[func][prm], list): + for i, item in enumerate(widget_param_dict[func][prm]): + item.value = val[i] + else: + widget_param_dict[func][prm].value = val + except Exception as e: + print(e) + proc_settings_browse_button.disabled=True + proc_settings_browse_button.description='Use Text Field' + + proc_settings_read_button.on_click(select_proc) + proc_settings_browse_button.on_click(select_proc) + + ioparam_grid[0,:] = proc_settings_hbox + ioparam_grid[1,0] = data_format_dropdown + ioparam_grid[2,:5] = hvsr_band_hbox + ioparam_grid[3,:5] = peak_freq_range_hbox + ioparam_grid[4,:1] = detrend_type_dropdown + ioparam_grid[4,1] = detrend_order + ioparam_grid[5,:6] = trim_directory + ioparam_grid[5, 6:8] = trim_export_dropdown + ioparam_grid[5, 8] = trim_directory_upload + + # PYTHON API ACCORDION + inputAPI_grid = widgets.GridspecLayout(2, 10) + # A text label with "input_params()" + input_params_prefix = widgets.HTML(value='<style>p {word-wrap: break-word}</style> <p>' + 'input_params' + '</p>', + layout=widgets.Layout(width='fill', justify_content='flex-end',align_content='flex-start')) + input_params_call = widgets.HTML(value='<style>p {word-wrap: break-word}</style> <p>' + '()' + '</p>', + layout=widgets.Layout(width='fill', justify_content='flex-start',align_content='flex-start'),) + #input_params_call = widgets.Label(value='input_params()', layout=widgets.Layout(width='auto')) + inputAPI_grid[0, 0] = input_params_prefix + inputAPI_grid[0, 1:] = input_params_call + + # A text label with "fetch_data()" + fetch_data_prefix = widgets.HTML(value='<style>p {word-wrap: break-word}</style> <p>' + 'fetch_data' + '</p>', + layout=widgets.Layout(width='fill', justify_content='flex-end',align_content='flex-start')) + fetch_data_call = widgets.HTML(value='<style>p {word-wrap: break-word}</style> <p>' + '()' + '</p>', + layout=widgets.Layout(width='fill', justify_content='flex-start',align_content='flex-start'),) + inputAPI_grid[1, 0] = fetch_data_prefix + inputAPI_grid[1, 1:] = fetch_data_call + + # Set it all in place + metaLabel = widgets.Label('Instrument', layout=widgets.Layout(height='20%', align_content='center', justify_content='flex-end')) + instLabel = widgets.Label('Acquisition', layout=widgets.Layout(height='20%', align_content='center', justify_content='flex-end')) + locLabel = widgets.Label('Location', layout=widgets.Layout(height='20%', align_content='center', justify_content='flex-end')) + ioparmLabel = widgets.Label('IO/Params', layout=widgets.Layout(height='20%', align_content='center', justify_content='flex-end')) + apiLabel = widgets.Label('API Call', layout=widgets.Layout(height='20%', align_content='center', justify_content='flex-end')) + input_accordion_label_box.children = [metaLabel, instLabel, locLabel, ioparmLabel, apiLabel] + input_accordion_label_box.layout = widgets.Layout(align_content='space-between', width='5%') + + input_accordion.children = [metadata_grid, instrument_grid, location_grid, ioparam_grid, inputAPI_grid] + input_accordion.titles = ["Instrument Metadata", "Acquisition Information", "Location Information", "I/O and Parameters", "See Python API Call"] + input_accordion_box.layout = widgets.Layout(align_content='space-between', width='99%') + + input_accordion.layout = widgets.Layout(width='99%') + + # ADD THE REST OF THE WIDGETS AROUND THE ACCORDIONS + # A text box for the site name + site_name = widgets.Text(description='Site Name:', + value='HVSR_Site', + placeholder='HVSR_Site', + style={'description_width': 'initial'}, layout=widgets.Layout(width='30%')) + + tenpct_spacer = widgets.Button(description='', layout=widgets.Layout(width='20%', visibility='hidden')) + + # Dropdown with different source types + data_source_type = widgets.Dropdown(options=[('File', 'file'), ('Raw', 'raw'), ('Batch', 'batch'), ('Directory', 'dir')], + description='Data Source type:', + value='file',orientation='horizontal', + style={'description_width': 'initial'}, + layout=widgets.Layout(width='20%')) + def on_ds_change(event): + if data_source_type.value == 'file' or data_source_type.value== 'batch': + browse_data_button.description = 'Select Files' + else: + browse_data_button.description = 'Select Folders' + data_source_type.observe(on_ds_change) + # Dropdown labeled "Instrument" with options "Raspberry Shake", "Tromino", "Other" + instrument_dropdown = widgets.Dropdown(options=['Raspberry Shake', 'Tromino', 'Other'], + style={'description_width': 'initial'}, + description='Instrument:',layout=widgets.Layout(width='20%')) + + # Processing Settings + processing_settings_button = widgets.FileUpload(accept='.proc', description='Processing Settings', + multiple=False,layout=widgets.Layout(width='10%')) + + # Whether to show plots outside of widget + show_plot_check = widgets.Checkbox(description='Print Plots', value=False, disabled=False, indent=False, + layout=widgets.Layout(width='10%', justify_content='flex-end')) + + + # Whether to print to terminal + verbose_check = widgets.Checkbox(description='Verbose', value=False, disabled=False, indent=False, + layout=widgets.Layout(width='10%', justify_content='flex-end')) + + # A text box labeled Data Filepath + data_filepath = widgets.Text(description='Data Filepath:', + placeholder='sample', value='sample', + style={'description_width': 'initial'},layout=widgets.Layout(width='70%')) + + # A button next to it labeled "Browse" + browse_data_button = widgets.Button(description='Select Files', layout=widgets.Layout(width='10%')) + def select_datapath(event): + try: + root = tk.Tk() + root.wm_attributes('-topmost', True) + root.withdraw() + if data_source_type.value=='file' or data_source_type.value=='batch': + data_filepath.value = str(filedialog.askopenfilenames(defaultextension='.MSEED', title='Select Data File')) + else: + data_filepath.value = str(filedialog.askdirectory(mustexist=True, title="Select Data Directory")) + root.destroy() + except Exception as e: + print(e) + browse_data_button.disabled=True + browse_data_button.description='Use Text Field' + browse_data_button.on_click(select_datapath) + + # A text box labeled Metadata Filepath + metadata_filepath = widgets.Text(description='Metadata Filepath:', + style={'description_width': 'initial'},layout=widgets.Layout(width='70%')) + + # A button next to it labeled "Browse" + browse_metadata_button = widgets.Button(description='Select File(s)', layout=widgets.Layout(width='10%')) + def select_metapath(event): + try: + root = tk.Tk() + root.wm_attributes('-topmost', True) + root.withdraw() + metadata_filepath.value = str(filedialog.askopenfilenames(title='Select Metadata File(s)')) + root.destroy() + except Exception as e: + print(e) + browse_metadata_button.disabled=True + browse_metadata_button.description='Use Text Field' + browse_metadata_button.on_click(select_metapath) + + # A progress bar + progress_bar = widgets.FloatProgress(value=0.0,min=0.0,max=1.0, + bar_style='info', + orientation='horizontal',layout=widgets.Layout(width='85%')) + + # A dark yellow button labeled "Read Data" + read_data_button = widgets.Button(description='Read Data', + button_style='warning',layout=widgets.Layout(width='10%')) + + + # A forest green button labeled "Process HVSR" + process_hvsr_button = widgets.Button(description='Run', + button_style='success',layout=widgets.Layout(width='5%')) + + # Update input_param call + def update_input_param_call(): + input_param_text = f"""(input_data='{data_filepath.value}', metapath='{metadata_filepath.value}', site='{site_name.value}', network='{network_textbox.value}', + station='{station_textbox.value}', loc='{location_textbox.value}', channels={[z_channel_textbox.value, e_channel_textbox.value, n_channel_textbox.value]}, + acq_date='{acquisition_date_picker.value}', starttime='{start_time_picker.value}', endtime='{end_time_picker.value}', tzone='{time_zone_dropdown.value}', + xcoord={xcoord_textbox.value}, ycoord={ycoord_textbox.value}, elevation={zcoord_textbox.value}, depth=0 + input_crs='{input_crs_textbox.value}', output_crs='{output_crs_textbox.value}', elev_unit='{elevation_unit_textbox.value}', + instrument='{instrument_dropdown.value}', hvsr_band={[hvsr_band_min_box.value, hvsr_band_max_box.value]}, + peak_freq_range={[peak_freq_range_min_box.value, peak_freq_range_max_box.value]}, verbose={verbose_check.value})""" + input_params_call.value='<style>p {word-wrap: break-word}</style> <p>' + input_param_text + '</p>' + update_input_param_call() + + # Update fetch_data call + def update_fetch_data_call(): + fetch_data_text = f"""(params=hvsr_data, source={data_source_type.value}, trim_dir={trim_directory.value}, + export_format={trim_export_dropdown.value}, detrend={detrend_type_dropdown.value}, detrend_order={detrend_order.value}, verbose={verbose_check.value})""" + fetch_data_call.value='<style>p {word-wrap: break-word}</style> <p>' + fetch_data_text + '</p>' + update_fetch_data_call() + + site_hbox = widgets.HBox() + site_hbox.children = [site_name, tenpct_spacer, tenpct_spacer, tenpct_spacer, tenpct_spacer, tenpct_spacer, show_plot_check, verbose_check] + datapath_hbox = widgets.HBox() + datapath_hbox.children = [data_filepath, browse_data_button, data_source_type] + metadata_hbox = widgets.HBox() + metadata_hbox.children = [metadata_filepath, browse_metadata_button, instrument_dropdown] + progress_hbox = widgets.HBox() + progress_hbox.children = [progress_bar, read_data_button, process_hvsr_button] + + input_params_vbox = widgets.VBox() + input_params_vbox.children = [site_hbox,datapath_hbox,metadata_hbox,progress_hbox] + + input_accordion_box.children = [input_accordion] + #input_HBox.children = [input_accordion_label_box, input_accordion_box] + #input_HBox.layout= widgets.Layout(align_content='space-between') + + # Create a GridBox with 12 rows and 20 columns + input_tab = widgets.GridBox(layout=widgets.Layout(grid_template_columns='repeat(10, 1)', + grid_template_rows='repeat(12, 1)')) + + # Add the VBox to the GridBox + input_tab.children = [site_hbox, + datapath_hbox, + metadata_hbox, + input_accordion_box, + progress_hbox] + + def get_input_params(): + input_params_kwargs={ + 'input_data':data_filepath.value, + 'metapath':metadata_filepath.value, + 'site':site_name.value, + 'instrument':instrument_dropdown.value, + 'network':network_textbox.value, 'station':station_textbox.value, 'loc':location_textbox.value, + 'channels':[z_channel_textbox.value, e_channel_textbox.value, n_channel_textbox.value], + 'starttime':start_time_picker.value, + 'endtime':end_time_picker.value, + 'tzone':time_zone_dropdown.value, + 'xcoord':xcoord_textbox.value, + 'ycoord':ycoord_textbox.value, + 'elevation':zcoord_textbox.value, 'elev_unit':elevation_unit_textbox.value,'depth':0, + 'input_crs':input_crs_textbox.value,'output_crs':output_crs_textbox.value, + 'hvsr_band':[hvsr_band_min_box.value, hvsr_band_max_box.value], + 'peak_freq_range':[peak_freq_range_min_box.value, peak_freq_range_max_box.value]} + return input_params_kwargs + + def get_fetch_data_params(): + fetch_data_kwargs = { + 'source':data_source_type.value, + 'trim_dir':trim_directory.value, + 'export_format':data_format_dropdown.value, + 'detrend':detrend_type_dropdown.value, + 'detrend_order':detrend_order.value} + if str(fetch_data_kwargs['detrend']).lower() == 'none': + fetch_data_kwargs['detrend'] = None + + if str(fetch_data_kwargs['trim_dir']).lower() == 'none': + fetch_data_kwargs['trim_dir'] = None + return fetch_data_kwargs + + def read_data(button): + progress_bar.value = 0 + log_textArea.value += f"\n\nREADING DATA [{datetime.datetime.now()}]" + + ip_kwargs = get_input_params() + hvsr_data = sprit_hvsr.input_params(**ip_kwargs, verbose=verbose_check.value) + log_textArea.value += f"\n\n{datetime.datetime.now()}\ninput_params():\n'{ip_kwargs}" + if button.description=='Read Data': + progress_bar.value=0.333 + else: + progress_bar.value=0.1 + fd_kwargs = get_fetch_data_params() + hvsr_data = sprit_hvsr.fetch_data(hvsr_data, **fd_kwargs, verbose=verbose_check.value) + log_textArea.value += '\n\n'+str(datetime.datetime.now())+'\nfetch_data():\n\t'+str(fd_kwargs) + if button.description=='Read Data': + progress_bar.value=0.666 + else: + progress_bar.value=0.2 + + use_hv_curve_rmse.value=False + use_hv_curve_rmse.disabled=True + + update_preview_fig(hvsr_data, preview_fig) + + if button.description=='Read Data': + sprit_tabs.selected_index = 1 + progress_bar.value=0 + return hvsr_data + + read_data_button.on_click(read_data) + + def get_remove_noise_kwargs(): + def get_remove_method(): + remove_method_list=[] + do_stalta = stalta_check.value + do_sat_pct = max_saturation_check.value + do_noiseWin=noisy_windows_check.value + do_warmcool=warmcool_check.value + + if auto_remove_check.value: + remove_method_list=['stalta', 'saturation', 'noise', 'warmcool'] + else: + if do_stalta: + remove_method_list.append('stalta') + if do_sat_pct: + remove_method_list.append('saturation') + if do_noiseWin: + remove_method_list.append('noise') + if do_warmcool: + remove_method_list.append('warmcool') + + if not remove_method_list: + remove_method_list = None + return remove_method_list + + remove_noise_kwargs = {'remove_method':get_remove_method(), + 'sat_percent':max_saturation_pct.value, + 'noise_percent':max_window_pct.value, + 'sta':sta.value, + 'lta':lta.value, + 'stalta_thresh':[stalta_thresh_low.value, stalta_thresh_hi.value], + 'warmup_time':warmup_time.value, + 'cooldown_time':cooldown_time.value, + 'min_win_size':noisy_window_length.value, + 'remove_raw_noise':raw_data_remove_check.value, + 'verbose':verbose_check.value} + return remove_noise_kwargs + + def get_generate_ppsd_kwargs(): + ppsd_kwargs = { + 'skip_on_gaps':skip_on_gaps.value, + 'db_bins':[db_bins_min.value, db_bins_max.value, db_bins_step.value], + 'ppsd_length':ppsd_length.value, + 'overlap':overlap_pct.value, + 'special_handling':special_handling_dropdown.value, + 'period_smoothing_width_octaves':period_smoothing_width.value, + 'period_step_octaves':period_step_octave.value, + 'period_limits':[period_limits_min.value, period_limits_max.value], + 'verbose':verbose_check.value + } + + if str(ppsd_kwargs['special_handling']).lower() == 'none': + ppsd_kwargs['special_handling'] = None + return ppsd_kwargs + + def get_remove_outlier_curve_kwargs(): + roc_kwargs = { + 'use_percentile':rmse_pctile_check.value, + 'rmse_thresh':rmse_thresh.value, + 'use_hv_curve':False, + 'verbose':verbose_check.value + } + return roc_kwargs + + def get_process_hvsr_kwargs(): + if smooth_hv_curve_bool.value: + smooth_value = smooth_hv_curve.value + else: + smooth_value = smooth_hv_curve_bool.value + + if resample_hv_curve_bool.value: + resample_value = resample_hv_curve.value + else: + resample_value = resample_hv_curve_bool.value + + ph_kwargs={'method':h_combine_meth.value, + 'smooth':smooth_value, + 'freq_smooth':freq_smoothing.value, + 'f_smooth_width':freq_smooth_width.value, + 'resample':resample_value, + 'outlier_curve_rmse_percentile':use_hv_curve_rmse.value, + 'verbose':verbose_check.value} + return ph_kwargs + + def get_check_peaks_kwargs(): + cp_kwargs = {'hvsr_band':[hvsr_band_min_box.value, hvsr_band_max_box.value], + 'peak_freq_range':[peak_freq_range_min_box.value, peak_freq_range_max_box.value], + 'peak_selection':peak_selection_type.value, + 'verbose':verbose_check.value} + return cp_kwargs + + def get_get_report_kwargs(): + def get_formatted_plot_str(): + # Initialize plot string + hvsr_plot_str = '' + comp_plot_str = '' + spec_plot_str = '' + + # Whether to use each plot + if use_plot_hv.value: + hvsr_plot_str=hvsr_plot_str + "HVSR" + if use_plot_comp.value: + comp_plot_str=comp_plot_str + "C" + if use_plot_spec.value: + spec_plot_str=spec_plot_str + "SPEC" + + # Whether components be on the same plot as HV curve? + if not combine_hv_comp.value: + comp_plot_str=comp_plot_str + "+" + else: + comp_plot_str=comp_plot_str.replace('+','') + + # Whether to show (log) standard deviations + if not show_std_hv.value: + hvsr_plot_str=hvsr_plot_str + " -s" + if not show_std_comp.value: + comp_plot_str=comp_plot_str + " -s" + + # Whether to show all peaks + if show_all_peaks_hv.value: + hvsr_plot_str=hvsr_plot_str + " all" + + # Whether curves from each time window are shown + if show_all_curves_hv.value: + hvsr_plot_str=hvsr_plot_str + " t" + if show_all_curves_comp.value: + comp_plot_str=comp_plot_str + " t" + + # Whether the best peak is displayed + if show_best_peak_hv.value: + hvsr_plot_str=hvsr_plot_str + " p" + if show_best_peak_comp.value: + comp_plot_str=comp_plot_str + " p" + if show_best_peak_spec.value: + spec_plot_str=spec_plot_str + " p" + + # Whether best peak value is annotated + if ann_best_peak_hv.value: + hvsr_plot_str=hvsr_plot_str + " ann" + if ann_best_peak_comp.value: + comp_plot_str=comp_plot_str + " ann" + if ann_best_peak_spec.value: + spec_plot_str=spec_plot_str + " ann" + + # Whether peaks from individual time windows are shown + if show_ind_peaks_hv.value: + hvsr_plot_str=hvsr_plot_str + " tp" + if show_ind_peaks_spec.value: + spec_plot_str=spec_plot_str + ' tp' + + # Whether to show legend + if show_legend_hv.value: + hvsr_plot_str=hvsr_plot_str + " leg" + if ann_best_peak_comp.value: + comp_plot_str=comp_plot_str + " leg" + if show_legend_spec.value: + spec_plot_str=spec_plot_str + " leg" + + # Combine string into one + plot_str = hvsr_plot_str + ' ' + comp_plot_str+ ' ' + spec_plot_str + return plot_str + + gr_kwargs = {'report_format':['print','csv'], + 'plot_type':get_formatted_plot_str(), + 'export_path':None, + 'csv_overwrite_opt':'overwrite', + 'no_output':False, + 'verbose':verbose_check.value + } + return gr_kwargs + + def process_data(button): + startProc=datetime.datetime.now() + progress_bar.value = 0 + log_textArea.value += f"\n\nPROCESSING DATA [{startProc}]" + global hvsr_data + # Read data again only if internal hvsr_data input_data variable is different from what is in the gui + if not 'hvsr_data' in globals() or not hasattr(hvsr_data, 'input_data') or \ + (pathlib.Path(hvsr_data.input_data).as_posix() != pathlib.Path(data_filepath.value).as_posix()): + hvsr_data = read_data(button) + + remove_noise_kwargs = get_remove_noise_kwargs() + hvsr_data = sprit_hvsr.remove_noise(hvsr_data, **remove_noise_kwargs) + log_textArea.value += f"\n\n{datetime.datetime.now()}\nremove_noise()\n\t{remove_noise_kwargs}" + progress_bar.value = 0.3 + + generate_ppsd_kwargs = get_generate_ppsd_kwargs() + hvsr_data = sprit_hvsr.generate_ppsds(hvsr_data, **generate_ppsd_kwargs) + progress_bar.value = 0.5 + log_textArea.value += f"\n\n{datetime.datetime.now()}\ngenerate_ppsds()\n\t{generate_ppsd_kwargs}" + + + # If this was started by clicking "Generate PPSDs", stop here + if button.description == 'Generate PPSDs': + return + + ph_kwargs = get_process_hvsr_kwargs() + hvsr_data = sprit_hvsr.process_hvsr(hvsr_data, **ph_kwargs) + log_textArea.value += f"\n\n{datetime.datetime.now()}\nprocess_hvsr()\n\t{ph_kwargs}" + progress_bar.value = 0.75 + update_outlier_fig() + + roc_kwargs = get_remove_outlier_curve_kwargs() + hvsr_data = sprit_hvsr.remove_outlier_curves(hvsr_data, **roc_kwargs) + log_textArea.value += f"\n\n{datetime.datetime.now()}\nremove_outlier_curves()\n\t{roc_kwargs}" + progress_bar.value = 0.85 + outlier_fig, hvsr_data = update_outlier_fig() + + use_hv_curve_rmse.value=False + use_hv_curve_rmse.disabled=False + + def get_rmse_range(): + minRMSE = 10000 + maxRMSE = -1 + if roc_kwargs['use_hv_curve']: + colnames = ['HV_Curves'] + else: + colnames = ['psd_values_Z', + 'psd_values_E', + 'psd_values_N'] + dataList = [] + for col in colnames: + dataArr = np.stack(hvsr_data.hvsr_windows_df[col]) + medCurveArr = np.nanmedian(dataArr, axis=0) + rmse = np.sqrt(((np.subtract(dataArr, medCurveArr)**2).sum(axis=1))/dataArr.shape[1]) + if rmse.min() < minRMSE: + minRMSE = rmse.min() + if rmse.max() > maxRMSE: + maxRMSE = rmse.max() + rmse_thresh_slider.min = minRMSE + rmse_thresh_slider.max = maxRMSE + rmse_thresh_slider.step = round((maxRMSE-minRMSE)/100, 2) + rmse_thresh_slider.value = maxRMSE + get_rmse_range() + + cp_kwargs = get_check_peaks_kwargs() + hvsr_data = sprit_hvsr.check_peaks(hvsr_data, **cp_kwargs) + log_textArea.value += f"\n\n{datetime.datetime.now()}\ncheck_peaks()\n\t{cp_kwargs}" + progress_bar.value = 0.9 + + gr_kwargs = get_get_report_kwargs() + hvsr_data = sprit_hvsr.get_report(hvsr_data, **gr_kwargs) + log_textArea.value += f"\n\n{datetime.datetime.now()}\nget_report()\n\t{gr_kwargs}\n\n" + hvsr_data.get_report(report_format='print') # Just in case print wasn't included + log_textArea.value += hvsr_data['Print_Report'] + printed_results_textArea.value = hvsr_data['Print_Report'] + hvsr_data.get_report(report_format='csv') + results_table.value = hvsr_data['CSV_Report'].to_html() + + log_textArea.value += f'Processing time: {datetime.datetime.now() - startProc}' + progress_bar.value = 0.95 + + update_results_fig(hvsr_data, gr_kwargs['plot_type']) + + progress_bar.value = 1 + global hvsr_results + hvsr_results = hvsr_data + return hvsr_results + + def parse_plot_string(plot_string): + plot_list = plot_string.split() + + hvsrList = ['hvsr', 'hv', 'h'] + compList = ['component', 'comp', 'c'] + compPlus = [item + '+' for item in compList] + specList = ['spectrogram', 'specgram', 'spec','sg', 's'] + + hvInd = np.nan + compInd = np.nan + specInd = np.nan + + hvIndFound = False + compIndFound = False + specIndFound = False + + for i, item in enumerate(plot_list): + if item.lower() in hvsrList and not hvIndFound: + # assign the index + hvInd = i + hvIndFound = True + if (item.lower() in compList or item.lower() in compPlus) and not compIndFound: + # assign the index + compInd = i + compIndFound = True + if item.lower() in specList and not specIndFound: + # assign the index + specInd = i + specIndFound = True + + # Get individual plot lists (should already be correctly ordered) + if hvInd is np.nan: + hvsr_plot_list = ['HVSR'] + + if compInd is np.nan: + comp_plot_list = [] + if specInd is np.nan: + if hvInd is not np.nan: + hvsr_plot_list = plot_list + spec_plot_list = [] + else: + if hvInd is not np.nan: + hvsr_plot_list = plot_list[hvInd:specInd] + spec_plot_list = plot_list[specInd:] + else: + if hvInd is not np.nan: + hvsr_plot_list = plot_list[hvInd:compInd] + + if specInd is np.nan: + comp_plot_list = plot_list[compInd:] + spec_plot_list = [] + else: + comp_plot_list = plot_list[compInd:specInd] + spec_plot_list = plot_list[specInd:] + + # Figure out how many subplots there will be + plot_list_list = [hvsr_plot_list, comp_plot_list, spec_plot_list] + + return plot_list_list + + def parse_hv_plot_list(hv_data, hvsr_plot_list, azimuth='HV'): + hvsr_data = hv_data + x_data = hvsr_data.x_freqs['Z'] + hvsrDF = hvsr_data.hvsr_windows_df + if azimuth == 'HV': + HVCol = 'HV_Curves' + else: + HVCol = 'HV_Curves_'+azimuth + + if 'tp' in hvsr_plot_list: + allpeaks = [] + for row in hvsrDF[hvsrDF['Use']]['CurvesPeakFreqs_'+azimuth].values: + for peak in row: + allpeaks.append(peak) + allInd = [] + for row, peakList in enumerate(hvsrDF[hvsrDF['Use']]['CurvesPeakIndices_'+azimuth].values): + for ind in peakList: + allInd.append((row, ind)) + x_vals = [] + y_vals = [] + y_max = np.nanmax(hvsr_data.hvsrp[azimuth]) + hvCurveInd = list(hvsrDF.columns).index(HVCol) + + for i, tp in enumerate(allpeaks): + x_vals.extend([tp, tp, None]) # add two x values and a None + y_vals.extend([0, hvsrDF.iloc[allInd[i][0], hvCurveInd][allInd[i][1]], None]) # add the first and last y values and a None + + results_fig.add_trace(go.Scatter(x=x_vals, y=y_vals, mode='lines', + line=dict(width=4, dash="solid", + color="rgba(128,0,0,0.1)"), + name='Best Peaks Over Time'), + row=1, col=1) + + if 't' in hvsr_plot_list: + alltimecurves = np.stack(hvsrDF[hvsrDF['Use']][HVCol]) + for i, row in enumerate(alltimecurves): + if i==0: + showLeg = True + else: + showLeg= False + results_fig.add_trace(go.Scatter(x=x_data[:-1], y=row, mode='lines', + line=dict(width=0.5, dash="solid", + color="rgba(100, 110, 100, 0.8)"), + showlegend=showLeg, + name='Ind. time win. curve', + hoverinfo='none'), + row=1, col=1) + + if 'all' in hvsr_plot_list: + for i, p in enumerate(hvsr_data['hvsr_peak_freqs'][azimuth]): + if i==0: + showLeg = True + else: + showLeg= False + + results_fig.add_trace(go.Scatter( + x=[p, p, None], # set x to None + y=[0, np.nanmax(np.stack(hvsrDF[HVCol])),None], # set y to None + mode="lines", # set mode to lines + line=dict(width=1, dash="dot", color="gray"), # set line properties + name="All checked peaks", # set legend name + showlegend=showLeg), + row=1, col=1) + + if '-s' not in hvsr_plot_list: + # Show standard deviation + results_fig.add_trace(go.Scatter(x=x_data, y=hvsr_data.hvsrp2[azimuth], + line={'color':'black', 'width':0.1},marker=None, + showlegend=False, name='Log. St.Dev. Upper', + hoverinfo='none'), + row=1, col=1) + + results_fig.add_trace(go.Scatter(x=x_data, y=hvsr_data.hvsrm2[azimuth], + line={'color':'black', 'width':0.1},marker=None, + fill='tonexty', fillcolor="rgba(128, 128, 128, 0.6)", + name='Log. St.Dev.', hoverinfo='none'), + row=1, col=1) + + if 'p' in hvsr_plot_list: + results_fig.add_trace(go.Scatter( + x=[hvsr_data['BestPeak'][azimuth]['f0'], hvsr_data['BestPeak'][azimuth]['f0'], None], # set x to None + y=[0,np.nanmax(np.stack(hvsrDF['HV_Curves'])),None], # set y to None + mode="lines", # set mode to lines + line=dict(width=1, dash="dash", color="black"), # set line properties + name="Best Peak"), + row=1, col=1) + + if 'ann' in hvsr_plot_list: + # Annotate best peak + results_fig.add_annotation(x=np.log10(hvsr_data['BestPeak'][azimuth]['f0']), + y=0, yanchor='bottom', xanchor='center', + text=f"{hvsr_data['BestPeak'][azimuth]['f0']:.3f} Hz", + bgcolor='rgba(255, 255, 255, 0.7)', + showarrow=False, + row=1, col=1) + return results_fig + + def parse_comp_plot_list(hv_data, comp_plot_list, azimuth='HV'): + + hvsr_data = hv_data + # Initial setup + x_data = hvsr_data.x_freqs['Z'] + hvsrDF = hvsr_data.hvsr_windows_df + same_plot = ((comp_plot_list != []) and ('+' not in comp_plot_list[0])) + + if same_plot: + yaxis_to_use = 'y2' + use_secondary = True + transparency_modifier = 0.5 + else: + yaxis_to_use = 'y' + use_secondary=False + transparency_modifier = 1 + + alpha = 0.4 * transparency_modifier + components = ['Z', 'E', 'N'] + + compColor_semi_light = {'Z':f'rgba(128,128,128,{alpha})', + 'E':f'rgba(0,0,128,{alpha})', + 'N':f'rgba(128,0,0,{alpha})'} + + alpha = 0.7 * transparency_modifier + compColor_semi = {'Z':f'rgba(128,128,128,{alpha})', + 'E':f'rgba(100,100,128,{alpha})', + 'N':f'rgba(128,100,100,{alpha})'} + + compColor = {'Z':f'rgba(128,128,128,{alpha})', + 'E':f'rgba(100,100,250,{alpha})', + 'N':f'rgba(250,100,100,{alpha})'} + + for az in hvsr_data.hvsr_az.keys(): + components.append(az) + compColor_semi_light[az] = f'rgba(0,128,0,{alpha})' + compColor_semi[az] = f'rgba(100,128,100,{alpha})' + compColor[az] = f'rgba(100,250,100,{alpha})' + + # Whether to plot in new subplot or not + if comp_plot_list != [] and '+' in comp_plot_list[0]: + compRow=2 + else: + compRow=1 + + # Whether to plot individual time curves + if 't' in comp_plot_list: + for comp in components: + alltimecurves = np.stack(hvsrDF[hvsrDF['Use']]['psd_values_'+comp]) + for i, row in enumerate(alltimecurves): + if i==0: + showLeg = True + else: + showLeg= False + + results_fig.add_trace(go.Scatter(x=x_data[:-1], y=row, mode='lines', + line=dict(width=0.5, dash="solid", + color=compColor_semi[comp]), + name='Ind. time win. curve', + showlegend=False, + hoverinfo='none', + yaxis=yaxis_to_use), + secondary_y=use_secondary, + row=compRow, col=1) + + # Code to plot standard deviation windows, if not removed + if '-s' not in comp_plot_list: + for comp in components: + # Show standard deviation + results_fig.add_trace(go.Scatter(x=x_data, y=hvsr_data.ppsd_std_vals_p[comp], + line={'color':compColor_semi_light[comp], 'width':0.1},marker=None, + showlegend=False, name='Log. St.Dev. Upper', + hoverinfo='none', + yaxis=yaxis_to_use), + secondary_y=use_secondary, + row=compRow, col=1) + + results_fig.add_trace(go.Scatter(x=x_data, y=hvsr_data.ppsd_std_vals_m[comp], + line={'color':compColor_semi_light[comp], 'width':0.1},marker=None, + fill='tonexty', fillcolor=compColor_semi_light[comp], + name=f'St.Dev. [{comp}]', hoverinfo='none', showlegend=False, + yaxis=yaxis_to_use), + secondary_y=use_secondary, + row=compRow, col=1) + + # Code to plot location of best peak + if 'p' in comp_plot_list: + minVal = 10000 + maxVal = -10000 + for comp in components: + currPPSDCurve = hvsr_data['psd_values_tavg'][comp] + if np.nanmin(currPPSDCurve) < minVal: + minVal = np.nanmin(currPPSDCurve) + if np.nanmax(currPPSDCurve) > maxVal: + maxVal = np.nanmax(currPPSDCurve) + + results_fig.add_trace(go.Scatter( + x=[hvsr_data['BestPeak'][azimuth]['f0'], hvsr_data['BestPeak'][azimuth]['f0'], None], # set x to None + y=[minVal,maxVal,None], # set y to None + mode="lines", # set mode to lines + line=dict(width=1, dash="dash", color="black"), # set line properties + name="Best Peak", + yaxis=yaxis_to_use), + secondary_y=use_secondary, + row=compRow, col=1) + + # Code to annotate value of best peak + if 'ann' in comp_plot_list: + minVal = 10000 + for comp in components: + currPPSDCurve = hvsr_data['psd_values_tavg'][comp] + if np.nanmin(currPPSDCurve) < minVal: + minVal = np.nanmin(currPPSDCurve) + results_fig.add_annotation(x=np.log10(hvsr_data['BestPeak'][azimuth]['f0']), + y=minVal, + text=f"{hvsr_data['BestPeak'][azimuth]['f0']:.3f} Hz", + bgcolor='rgba(255, 255, 255, 0.7)', + showarrow=False, + yref=yaxis_to_use, + secondary_y=use_secondary, + row=compRow, col=1) + + # Plot the main averaged component PPSDs + for comp in components: + results_fig.add_trace(go.Scatter(x=hvsr_data.x_freqs[comp], + y=hvsr_data['psd_values_tavg'][comp], + line=dict(width=2, dash="solid", + color=compColor[comp]),marker=None, + name='PPSD Curve '+comp, + yaxis=yaxis_to_use), + secondary_y=use_secondary, + row=compRow, col='all') + + # If new subplot, update accordingly + if compRow==2: + results_fig.update_xaxes(type='log', + range=[np.log10(hvsr_data['hvsr_band'][0]), np.log10(hvsr_data['hvsr_band'][1])], + row=compRow, col=1) + return results_fig + + def parse_spec_plot_list(hv_data, spec_plot_list, subplot_num, azimuth='HV'): + hvsr_data = hv_data + if azimuth == 'HV': + HVCol = 'HV_Curves' + else: + HVCol = 'HV_Curves_'+azimuth + + # Initial setup + hvsrDF = hvsr_data.hvsr_windows_df + specAxisTimes = np.array([dt.isoformat() for dt in hvsrDF.index.to_pydatetime()]) + y_data = hvsr_data.x_freqs['Z'][1:] + image_data = np.stack(hvsrDF[HVCol]).T + + maxZ = np.percentile(image_data, 100) + minZ = np.percentile(image_data, 0) + + use_mask = hvsr_data.hvsr_windows_df.Use.values + use_mask = np.tile(use_mask, (image_data.shape[0],1)) + use_mask = np.where(use_mask is False, np.nan, use_mask) + + hmap = go.Heatmap(z=image_data, + x=specAxisTimes, + y=y_data, + colorscale='Turbo', + showlegend=False, + #opacity=0.7, + hovertemplate='Time [UTC]: %{x}<br>Frequency [Hz]: %{y:.2f}<br>H/V Amplitude: %{z:.2f}<extra></extra>', + zmin=minZ,zmax=maxZ, showscale=False, name='HV Curve Amp. over Time') + results_fig.add_trace(hmap, row=subplot_num, col=1) + + data_used = go.Heatmap( + x=specAxisTimes, + y=y_data, + z=use_mask, + showlegend=False, + colorscale=[[0, 'rgba(0,0,0,0.66)'], [0.25, 'rgba(0,0,0,0.66)'], [1, 'rgba(250,250,250,0)']], + showscale=False, name='Used') + results_fig.add_trace(data_used, row=subplot_num, col=1) + + + # tp currently is not being added to spec_plot_list + if 'tp' in spec_plot_list: + yvals = [] + for row in hvsrDF[HVCol].values: + maxInd = np.argmax(row) + yvals.append(y_data[maxInd]) + tp_trace = go.Scatter(x=specAxisTimes, y=yvals, mode='markers', + line=None, marker=dict(color='white', size=2, line=dict(color='black', width=0.1)), name='Individual H/V Peaks') + results_fig.add_trace(tp_trace, row=subplot_num, col='all') + + if 'p' in spec_plot_list: + results_fig.add_hline(y=hvsr_data['BestPeak'][azimuth]['f0'], line_width=1, line_dash='dash', line_color='black', row=subplot_num, col='all') + + if 'ann' in spec_plot_list: + results_fig.add_annotation(x=specAxisTimes[-1], + y=hvsr_data['hvsr_band'][1], + text=f"Peak: {hvsr_data['BestPeak'][azimuth]['f0']:.3f} Hz", + bgcolor='rgba(255, 255, 255, 0.7)', + showarrow=False, xanchor='right', yanchor='top', + row=subplot_num, col='all') + + if 'leg' in spec_plot_list: + pass + + results_fig.update_yaxes(type='log', + range=[np.log10(hvsr_data['hvsr_band'][0]), np.log10(hvsr_data['hvsr_band'][1])], + row=subplot_num, col=1) + + results_fig.add_annotation( + text=f"{hvsrDF['Use'].astype(bool).sum()}/{hvsrDF.shape[0]} windows used", + x=max(specAxisTimes), + y=np.log10(min(y_data))+(np.log10(max(y_data))-np.log10(min(y_data)))*0.01, + xanchor="right", yanchor="bottom",bgcolor='rgba(256,256,256,0.7)', + showarrow=False,row=subplot_num, col=1) + + return results_fig + + def update_results_fig(hv_data, plot_string): + global results_fig + global results_subp + hvsr_data = hv_data + + if isinstance(hvsr_data, sprit_hvsr.HVSRBatch): + hvsr_data=hvsr_data[0] + + hvsrDF = hvsr_data.hvsr_windows_df + + plot_list = parse_plot_string(plot_string) + + combinedComp=False + noSubplots = 3 - plot_list.count([]) + if plot_list[1] != [] and '+' not in plot_list[1][0]: + combinedComp = True + noSubplots -= 1 + + # Get all data for each plotted item + # COMP Plot + # Figure out which subplot is which + if combinedComp: + comp_plot_row = 1 + spec_plot_row = 2 + else: + comp_plot_row = 2 + spec_plot_row = 3 + + # Re-initialize results_fig + results_fig.data = [] + results_fig.update_layout(grid=None) # Clear the existing grid layout + if not combinedComp: + results_subp = subplots.make_subplots(rows=3, cols=1, horizontal_spacing=0.01, vertical_spacing=0.07, + row_heights=[2, 1.5, 1]) + else: + results_subp = subplots.make_subplots(rows=2, cols=1, horizontal_spacing=0.01, vertical_spacing=0.07, + specs =[[{'secondary_y': True}], + [{'secondary_y': False}]], + row_heights=[1, 1]) + results_fig.update_layout(grid={'rows': noSubplots}) + #del results_fig + results_fig = go.FigureWidget(results_subp) + + results_fig = parse_comp_plot_list(hvsr_data, comp_plot_list=plot_list[1]) + + # HVSR Plot (plot this after COMP so it is on top COMP and to prevent deletion with no C+) + results_fig = parse_hv_plot_list(hvsr_data, hvsr_plot_list=plot_list[0]) + # Will always plot the HV Curve + results_fig.add_trace(go.Scatter(x=hvsr_data.x_freqs['Z'],y=hvsr_data.hvsr_curve, + line={'color':'black', 'width':1.5},marker=None, name='HVSR Curve'), + row=1, col='all') + + # SPEC plot + results_fig = parse_spec_plot_list(hvsr_data, spec_plot_list=plot_list[2], subplot_num=spec_plot_row) + + # Final figure updating + showtickLabels = (plot_list[1]==[] or '+' not in plot_list[1][0]) + if showtickLabels: + side='bottom' + else: + side='top' + results_fig.update_xaxes(type='log', + range=[np.log10(hvsr_data['hvsr_band'][0]), np.log10(hvsr_data['hvsr_band'][1])], + side='top', + row=1, col=1) + + results_fig.update_xaxes(type='log',overlaying='x', + range=[np.log10(hvsr_data['hvsr_band'][0]), np.log10(hvsr_data['hvsr_band'][1])], + side='bottom', + row=1, col=1) + if comp_plot_row!=1: + results_fig.update_xaxes(showticklabels=showtickLabels, row=comp_plot_row, col=1) + + if preview_fig.layout.width is None: + if outlier_fig.layout.width is None: + chartwidth = 800 + else: + chartwidth = outlier_fig.layout.width + + else: + chartwidth = preview_fig.layout.width + + results_fig.update_layout(margin={"l":10, "r":10, "t":35, 'b':0}, + showlegend=False, autosize=True, height = 1.2 * float(chartwidth), + title=f"{hvsr_data['site']} Results") + results_fig.update_yaxes(title_text='H/V Ratio', row=1, col=1) + results_fig.update_yaxes(title_text='H/V Over Time', row=noSubplots, col=1) + if comp_plot_row==1: + results_fig.update_yaxes(title_text="PPSD Amp\n[m2/s4/Hz][dB]", secondary_y=True, row=comp_plot_row, col=1) + else: + results_fig.update_yaxes(title_text="PPSD Amp\n[m2/s4/Hz][dB]", row=comp_plot_row, col=1) + + # Reset results_graph_widget and display + with results_graph_widget: + clear_output(wait=True) + display(results_fig) + + if show_plot_check.value: + results_fig.show() + + + sprit_tabs.selected_index = 4 + log_textArea.value += f"\n\n{datetime.datetime.now()}\nResults Figure Updated: {plot_string}" + + process_hvsr_button.on_click(process_data) + + # PREVIEW TAB + #Initialize plot + preview_subp = subplots.make_subplots(rows=4, cols=1, shared_xaxes=True, horizontal_spacing=0.01, vertical_spacing=0.01, row_heights=[3,1,1,1]) + preview_fig = go.FigureWidget(preview_subp) + + def update_preview_fig(hv_data, preview_fig): + preview_fig.data = [] + + hvsr_data = hv_data + if isinstance(hvsr_data, sprit_hvsr.HVSRBatch): + hvsr_data=hvsr_data[0] + + stream_z = hvsr_data['stream'].select(component='Z') #may be np.ma.masked_array + stream_e = hvsr_data['stream'].select(component='E') #may be np.ma.masked_array + stream_n = hvsr_data['stream'].select(component='N') #may be np.ma.masked_array + + # Get iso_times and datetime.datetime + utcdt = stream_z[0].times(type='utcdatetime') + iso_times=[] + dt_times = [] + for t in utcdt: + if t is not np.ma.masked: + iso_times.append(t.isoformat()) + dt_times.append(datetime.datetime.fromisoformat(t.isoformat())) + else: + iso_times.append(np.nan) + iso_times=np.array(iso_times) + dt_times = np.array (dt_times) + + # Generate spectrogram + f, t, Sxx = signal.spectrogram(x=stream_z[0].data, fs=stream_z[0].stats.sampling_rate, mode='magnitude') + + # Get times for the axis (one time per window) + axisTimes = [] + for tpass in t: + axisTimes.append((dt_times[0]+datetime.timedelta(seconds=tpass)).isoformat()) + + # Add data to preview_fig + # Add spectrogram of Z component + minz = np.percentile(Sxx, 1) + maxz = np.percentile(Sxx, 99) + hmap = go.Heatmap(z=Sxx, + x=axisTimes, + y=f, + colorscale='Turbo', + showlegend=False, + hovertemplate='Time [UTC]: %{x}<br>Frequency [Hz]: %{y:.2f}<br>Spectrogram Magnitude: %{z:.2f}<extra></extra>', + zmin=minz, zmax=maxz, showscale=False, name='Z Component Spectrogram') + preview_fig.add_trace(hmap, row=1, col=1) + preview_fig.update_yaxes(type='log', range=[np.log10(hvsr_data['hvsr_band'][0]), np.log10(hvsr_data['hvsr_band'][1])], row=1, col=1) + preview_fig.update_yaxes(title={'text':'Spectrogram (Z)'}, row=1, col=1) + + # Add raw traces + dec_factor=5 #This just makes the plotting go faster, by "decimating" the data + preview_fig.add_trace(go.Scatter(x=iso_times[::dec_factor], y=stream_z[0].data[::dec_factor], + line={'color':'black', 'width':0.5},marker=None, name='Z component data'), row=2, col='all') + preview_fig.update_yaxes(title={'text':'Z'}, row=2, col=1) + preview_fig.add_trace(go.Scatter(x=iso_times[::dec_factor], y=stream_e[0].data[::dec_factor], + line={'color':'blue', 'width':0.5},marker=None, name='E component data'),row=3, col='all') + preview_fig.update_yaxes(title={'text':'E'}, row=3, col=1) + preview_fig.add_trace(go.Scatter(x=iso_times[::dec_factor], y=stream_n[0].data[::dec_factor], + line={'color':'red', 'width':0.5},marker=None, name='N component data'), row=4, col='all') + preview_fig.update_yaxes(title={'text':'N'}, row=4, col=1) + + #preview_fig.add_trace(p) + preview_fig.update_layout(margin={"l":10, "r":10, "t":30, 'b':0}, showlegend=False, + title=f"{hvsr_data['site']} Data Preview") + + if show_plot_check.value: + preview_fig.show() + + # REMOVE NOISE SUBTAB + # STA/LTA Antitrigger + stalta_check = widgets.Checkbox(value=False, disabled=False, indent=False, description='STA/LTA Antitrigger') + sta = widgets.FloatText(description='STA [s]', style={'description_width': 'initial'}, placeholder=5, value=5,layout=widgets.Layout(height='auto', width='auto')) + lta = widgets.FloatText(description='LTA [s]', style={'description_width': 'initial'}, placeholder=30, value=30,layout=widgets.Layout(height='auto', width='auto')) + stalta_thresh_low = widgets.FloatText(description='STA/LTA Thresholds (low, high)', style={'description_width': 'initial'}, placeholder=0.5, value=0.5,layout=widgets.Layout(height='auto', width='auto')) + stalta_thresh_hi = widgets.FloatText(style={'description_width': 'initial'}, placeholder=5, value=5,layout=widgets.Layout(height='auto', width='auto')) + + #% Saturation Threshold + max_saturation_check = widgets.Checkbox(description='Percentage Threshold (Instantaneous)', value=False, disabled=False, indent=False) + max_saturation_pct = widgets.FloatText(description='Max Saturation %:', style={'description_width': 'initial'}, placeholder=0.995, value=0.995,layout=widgets.Layout(height='auto', width='auto')) + + #Noise Windows + noisy_windows_check = widgets.Checkbox(description='Noisy Windows', value=False, disabled=False, indent=False) + max_window_pct = widgets.FloatText(description='Max Window %:', style={'description_width': 'initial'}, placeholder=0.8, value=0.8,layout=widgets.Layout(height='auto', width='auto')) + noisy_window_length = widgets.FloatText(description='Window Length [s]:', style={'description_width': 'initial'}, placeholder=30, value=30,layout=widgets.Layout(height='auto', width='auto')) + + #Warmup/cooldown + warmcool_check = widgets.Checkbox(description='Warmup & Cooldown Time', value=False, disabled=False, indent=False) + warmup_time = widgets.FloatText(description='Warmup time [s]:', style={'description_width': 'initial'}, placeholder=0, value=0,layout=widgets.Layout(height='auto', width='auto')) + cooldown_time = widgets.FloatText(description='Cooldown time [s]:', style={'description_width': 'initial'}, placeholder=0, value=0,layout=widgets.Layout(height='auto', width='auto')) + + #STD Ratio + std_ratio_check = widgets.Checkbox(description='Standard Deviation Antitrigger (not yet implemented)', value=False, disabled=True, indent=False) + std_ratio_text = widgets.FloatText(description='StdDev Ratio:', style={'description_width': 'initial'}, placeholder=0, value=0,layout=widgets.Layout(height='auto', width='auto'), disabled=True) + std_window_length_text = widgets.FloatText(description='Moving window Length [s]:', style={'description_width': 'initial'}, placeholder=0, value=0,layout=widgets.Layout(height='auto', width='auto'),disabled=True) + + #Autoremove + auto_remove_check = widgets.Checkbox(description='Use Auto Remove', value=False, disabled=False, indent=False) + + #Remove from raw data + raw_data_remove_check = widgets.Checkbox(description='Remove Noise From Raw Data', value=False, disabled=False, indent=False) + + #remove_noise call + remove_noise_prefix = widgets.HTML(value='<style>p {word-wrap: break-word}</style> <p>' + 'remove_noise' + '</p>', + layout=widgets.Layout(width='fill', justify_content='flex-end',align_content='flex-start')) + remove_noise_call = widgets.HTML(value='()') + remove_noise_call_hbox = widgets.HBox([remove_noise_prefix, remove_noise_call]) + + # Update remove_outlier call + def update_remove_noise_call(): + rnkwargs = get_remove_noise_kwargs() + rn_text = f"""(hvsr_data=hvsr_data, remove_method={rnkwargs['remove_method']}, + sat_percent={rnkwargs['sat_percent']}, + noise_percent={rnkwargs['noise_percent']}, + sta={rnkwargs['sta']}, + lta={rnkwargs['lta']}, + stalta_thresh={rnkwargs['stalta_thresh']}, + warmup_time={rnkwargs['warmup_time']}, + cooldown_time={rnkwargs['cooldown_time']}, + min_win_size={rnkwargs['min_win_size']}, + remove_raw_noise={rnkwargs['remove_raw_noise']}, + verbose={verbose_check.value})""" + remove_noise_call.value='<style>p {word-wrap: break-word}</style> <p>' + rn_text + '</p>' + update_remove_noise_call() + + #Update noise windows + update_noise_windows_button = widgets.Button(description='Update Noise Windows',button_style='info',layout=widgets.Layout(height='auto', width='auto'), disabled=True) + + preview_graph_widget = widgets.Output() + #progress bar (same as above) + preview_progress_hbox = widgets.HBox(children=[progress_bar, update_noise_windows_button, process_hvsr_button]) + + # Add it all in to the tab + stalta_hbox = widgets.HBox([stalta_check, sta, lta, stalta_thresh_low, stalta_thresh_hi]) + sat_hbox = widgets.HBox([max_saturation_check, max_saturation_pct]) + noise_win_hbox = widgets.HBox([noisy_windows_check, max_window_pct, noisy_window_length]) + warmcool_hbox = widgets.HBox([warmcool_check, warmup_time, cooldown_time]) + std_ratio_hbox = widgets.HBox([std_ratio_check, std_ratio_text, std_window_length_text]) + spacer_hbox = widgets.HBox([tenpct_spacer]) + + preview_noise_tab = widgets.VBox([stalta_hbox, + sat_hbox, + noise_win_hbox, + warmcool_hbox, + std_ratio_hbox, + auto_remove_check, + raw_data_remove_check, + spacer_hbox, + remove_noise_call_hbox]) + + preview_graph_tab = widgets.VBox(children=[preview_graph_widget]) + preview_subtabs = widgets.Tab([preview_graph_tab, preview_noise_tab]) + preview_tab = widgets.VBox() + + preview_subtabs.set_title(0, "Data Preview") + preview_subtabs.set_title(1, "Noise Removal") + + preview_tab.children = [preview_subtabs, preview_progress_hbox] + # Initialize tab + with preview_graph_widget: + display(preview_fig) + + # SETTINGS TAB + plot_settings_tab = widgets.GridspecLayout(18, ui_width) + settings_progress_hbox = widgets.HBox(children=[progress_bar, tenpct_spacer, process_hvsr_button]) + + # PPSD SETTINGS SUBTAB + ppsd_length_label = widgets.Label(value='Window Length for PPSDs:') + ppsd_length = widgets.FloatText(style={'description_width': 'initial'}, + placeholder=20, value=20,layout=widgets.Layout(height='auto', width='auto'), disabled=False) + + overlap_pct_label = widgets.Label(value='Overlap %:') + overlap_pct = widgets.FloatText(style={'description_width': 'initial'}, + placeholder=0.5, value=0.5, layout=widgets.Layout(height='auto', width='auto'), disabled=False) + + period_step_label = widgets.Label(value='Period Step Octaves:') + period_step_octave = widgets.FloatText(style={'description_width': 'initial'}, + placeholder=0.0625, value=0.0625, layout=widgets.Layout(height='auto', width='auto'), disabled=False) + + skip_on_gaps_label = widgets.Label(value='Skip on gaps:') + skip_on_gaps = widgets.Checkbox(value=False, disabled=False, indent=False) + + db_step_label = widgets.Label(value='dB bins:') + db_bins_min = widgets.FloatText(description='Min. dB', style={'description_width': 'initial'}, + placeholder=-200, value=-200, layout=widgets.Layout(height='auto', width='auto'), disabled=False) + db_bins_max = widgets.FloatText(description='Max. dB', style={'description_width': 'initial'}, + placeholder=-50, value=-50, layout=widgets.Layout(height='auto', width='auto'), disabled=False) + db_bins_step = widgets.FloatText(description='dB Step', style={'description_width': 'initial'}, + placeholder=1, value=1, layout=widgets.Layout(height='auto', width='auto'), disabled=False) + + period_limit_label = widgets.Label(value='Period Limits:') + minPLim = round(1/(hvsr_band_max_box.value), 3) + maxPLim = round(1/(hvsr_band_min_box.value), 3) + period_limits_min = widgets.FloatText(description='Min. Period Limit', style={'description_width': 'initial'}, + placeholder=minPLim, value=minPLim, layout=widgets.Layout(height='auto', width='auto'), disabled=False) + period_limits_max = widgets.FloatText(description='Max. Period Limit', style={'description_width': 'initial'}, + placeholder=maxPLim, value=maxPLim, layout=widgets.Layout(height='auto', width='auto'), disabled=False) + period_smoothing_width = widgets.FloatText(description='Period Smoothing Width', style={'description_width': 'initial'}, + placeholder=1, value=1, layout=widgets.Layout(height='auto', width='auto'), disabled=False) + + special_handling_dropdown = widgets.Dropdown(description='Special Handling', value='none', + options=[('None', 'none'), ('Ringlaser', 'ringlaser'), ('Hydrophone', 'hydrophone')], + style={'description_width': 'initial'}, layout=widgets.Layout(height='auto', width='auto'), disabled=False) + + #remove_noise call + generate_ppsd_prefix = widgets.HTML(value='<style>p {word-wrap: break-word}</style> <p>' + 'generate_ppsds' + '</p>', + layout=widgets.Layout(width='fill', justify_content='flex-end',align_content='flex-start')) + generate_ppsd_call = widgets.HTML(value='()') + generate_ppsd_call_hbox = widgets.HBox([generate_ppsd_prefix, generate_ppsd_call]) + + # Update generate_ppsds() call + def update_generate_ppsd_call(): + gppsdkwargs = get_generate_ppsd_kwargs() + gppsd_text = f"""(hvsr_data=hvsr_data, + stats=hvsr_data['stream'].select(component='*').traces[0].stats, + metadata=hvsr_data['paz']['*'], + skip_on_gaps={gppsdkwargs['skip_on_gaps']}, + db_bins={gppsdkwargs['db_bins']}, + ppsd_length={gppsdkwargs['ppsd_length']}, + overlap={gppsdkwargs['overlap']}, + special_handling={gppsdkwargs['special_handling']}, + period_smoothing_width_octaves={gppsdkwargs['period_smoothing_width_octaves']}, + period_step_octaves={gppsdkwargs['period_step_octaves']}, + period_limits={gppsdkwargs['period_limits']}, + verbose={verbose_check.value})""" + generate_ppsd_call.value='<style>p {word-wrap: break-word}</style> <p>' + gppsd_text + '</p>' + update_generate_ppsd_call() + + ppsd_length_hbox = widgets.HBox([ppsd_length_label, ppsd_length]) + overlap_pct_hbox = widgets.HBox([overlap_pct_label, overlap_pct]) + pstep_hbox = widgets.HBox([period_step_label, period_step_octave]) + skipgaps_hbox = widgets.HBox([skip_on_gaps_label, skip_on_gaps]) + db_bins_hbox = widgets.HBox([db_step_label, db_bins_min, db_bins_max, db_bins_step]) + plim_hbox = widgets.HBox([period_limit_label, period_limits_min, period_limits_max, period_smoothing_width]) + + ppsd_settings_tab = widgets.VBox([ppsd_length_hbox, + overlap_pct_hbox, + pstep_hbox, + skipgaps_hbox, + db_bins_hbox, + plim_hbox, + special_handling_dropdown, + generate_ppsd_call_hbox]) + + # OUTLIER SETTINGS SUBTAB + rmse_pctile_check = widgets.Checkbox(description='Using percentile', layout=widgets.Layout(height='auto', width='auto'), style={'description_width': 'initial'}, value=True) + rmse_thresh = widgets.FloatText(description='RMSE Threshold', style={'description_width': 'initial'}, + placeholder=98, value=98, layout=widgets.Layout(height='auto', width='auto'), disabled=False) + use_hv_curve_rmse = widgets.Checkbox(description='Use HV Curve Outliers (may only be used after they have been calculated during the process_hvsr() step))', layout=widgets.Layout(height='auto', width='auto'), style={'description_width': 'initial'}, value=False, disabled=True) + + outlier_threshbox_hbox = widgets.HBox(children=[rmse_thresh, rmse_pctile_check]) + outlier_params_vbox = widgets.VBox(children=[outlier_threshbox_hbox, use_hv_curve_rmse]) + + global outlier_fig + outlier_fig = go.FigureWidget() + outlier_graph_widget = widgets.Output() + + outlier_thresh_slider_label = widgets.Label(value='RMSE Thresholds:') + rmse_thresh_slider = widgets.FloatSlider(value=0, min=0, max=100, step=0.1,description='RMSE Value',layout=widgets.Layout(height='auto', width='auto'),disabled=True) + rmse_pctile_slider = widgets.FloatSlider(value=get_default(sprit_hvsr.remove_outlier_curves, 'rmse_thresh'), min=0, max=100, step=0.1, description="Percentile",layout=widgets.Layout(height='auto', width='auto'),) + + def calc_rmse(array_2d): + medArray = np.nanmedian(array_2d, axis=0) + rmse = np.sqrt(((np.subtract(array_2d, medArray)**2).sum(axis=1))/array_2d.shape[1]) + return rmse + + def on_update_rmse_thresh_slider(change): + if use_hv_curve_rmse.value: + rmse = calc_rmse(np.stack(hvsr_data.hvsr_windows_df['HV_Curves'])) + else: + rmsez = calc_rmse(np.stack(hvsr_data.hvsr_windows_df['psd_values_Z'])) + rmsee = calc_rmse(np.stack(hvsr_data.hvsr_windows_df['psd_values_E'])) + rmsen = calc_rmse(np.stack(hvsr_data.hvsr_windows_df['psd_values_N'])) + + rmse = np.stack([rmsez, rmsee, rmsen]).flatten() + + if rmse_pctile_check.value: + rmse_thresh.value = rmse_pctile_slider.value + else: + rmse_thresh.value = rmse_thresh_slider.value + rmse_pctile_slider.value = ((rmse < rmse_thresh_slider.value).sum() / len(rmse)) * 100 + + def on_update_rmse_pctile_slider(change): + if use_hv_curve_rmse.value: + rmse = calc_rmse(np.stack(hvsr_data.hvsr_windows_df['HV_Curves'])) + else: + rmsez = calc_rmse(np.stack(hvsr_data.hvsr_windows_df['psd_values_Z'])) + rmsee = calc_rmse(np.stack(hvsr_data.hvsr_windows_df['psd_values_E'])) + rmsen = calc_rmse(np.stack(hvsr_data.hvsr_windows_df['psd_values_N'])) + + rmse = np.stack([rmsez, rmsee, rmsen]) + + if rmse_pctile_check.value: + rmse_thresh_slider.value = np.percentile(rmse, rmse_pctile_slider.value) + rmse_thresh.value = rmse_pctile_slider.value + else: + rmse_thresh.value = rmse_thresh_slider.value + + def on_update_rmse_pctile_check(change): + if rmse_pctile_check.value: + rmse_pctile_slider.disabled = False + rmse_thresh_slider.disabled = True + else: + rmse_pctile_slider.disabled = True + rmse_thresh_slider.disabled = False + + def on_update_rmse_thresh(change): + if rmse_pctile_check.value: + rmse_pctile_slider.value = rmse_thresh.value + else: + rmse_thresh_slider.value = rmse_thresh.value + + rmse_pctile_check.observe(on_update_rmse_pctile_check) + rmse_thresh_slider.observe(on_update_rmse_thresh_slider) + rmse_pctile_slider.observe(on_update_rmse_pctile_slider) + rmse_thresh.observe(on_update_rmse_thresh) + + use_hv_curve_label = widgets.Label(value='NOTE: Outlier curves may only be identified after PPSDs have been calculated (during the generate_ppsds() step)', layout=widgets.Layout(height='auto', width='80%')) + generate_ppsd_button = widgets.Button(description='Generate PPSDs', layout=widgets.Layout(height='auto', width='20%', justify_content='flex-end'), disabled=False) + update_outlier_plot_button = widgets.Button(description='Remove Outliers', layout=widgets.Layout(height='auto', width='20%', justify_content='flex-end'), disabled=False) + outlier_ppsd_hbox = widgets.HBox([use_hv_curve_label, generate_ppsd_button, update_outlier_plot_button]) + remove_outlier_curve_prefix = widgets.HTML(value='<style>p {word-wrap: break-word}</style> <p>' + 'remove_outlier_curves' + '</p>', + layout=widgets.Layout(width='fill', justify_content='flex-end',align_content='flex-start')) + remove_outlier_curve_call = widgets.HTML(value='()') + remove_outlier_hbox = widgets.HBox([remove_outlier_curve_prefix, remove_outlier_curve_call]) + + # Update remove_outlier call + def update_remove_outlier_curve_call(): + roc_text = f"""(hvsr_data=hvsr_data, rmse_thresh={rmse_thresh.value}, use_percentile={rmse_pctile_check.value}, + use_hv_curve={use_hv_curve_rmse.value}...verbose={verbose_check.value})""" + remove_outlier_curve_call.value='<style>p {word-wrap: break-word}</style> <p>' + roc_text + '</p>' + update_remove_outlier_curve_call() + + def update_outlier_fig_button(button): + outlier_fig, hvsr_data = update_outlier_fig(button) + + generate_ppsd_button.on_click(process_data) + + update_outlier_plot_button.on_click(update_outlier_fig_button) + + outlier_settings_tab = widgets.VBox(children=[outlier_params_vbox, + outlier_graph_widget, + outlier_thresh_slider_label, + rmse_thresh_slider, + rmse_pctile_slider, + outlier_ppsd_hbox, + remove_outlier_hbox]) + + with outlier_graph_widget: + display(outlier_fig) + + def update_outlier_fig(input=None, _rmse_thresh=rmse_pctile_slider.value, _use_percentile=True, _use_hv_curve=use_hv_curve_rmse.value, _verbose=verbose_check.value): + global outlier_fig + global hvsr_data + hv_data = hvsr_data + + + roc_kwargs = {'rmse_thresh':rmse_pctile_slider.value, + 'use_percentile':True, + 'use_hv_curve':use_hv_curve_rmse.value, + 'plot_engine':'plotly', + 'show_plot':False, + 'verbose':verbose_check.value + } + if 'PPSDStatus' in hvsr_data.ProcessingStatus.keys() and hvsr_data.ProcessingStatus['PPSDStatus']: + log_textArea.value += f"\n\n{datetime.datetime.now()}\nremove_outlier_curves():\n'{roc_kwargs}" + hvsr_data = sprit_hvsr.remove_outlier_curves(hvsr_data, **roc_kwargs) + else: + log_textArea.value += f"\n\n{datetime.datetime.now()}\nremove_outlier_curves() attempted, but not completed. hvsr_data.ProcessingStatus['PPSDStatus']=False\n'{roc_kwargs}" + return outlier_fig, hvsr_data + + if roc_kwargs['use_hv_curve']: + no_subplots = 1 + if hasattr(hvsr_data, 'hvsr_windows_df') and 'HV_Curves' in hvsr_data.hvsr_windows_df.columns: + outlier_fig.data = [] + outlier_fig.update_layout(grid=None) # Clear the existing grid layout + outlier_subp = subplots.make_subplots(rows=no_subplots, cols=1, horizontal_spacing=0.01, vertical_spacing=0.1) + outlier_fig.update_layout(grid={'rows': 1}) + outlier_fig = go.FigureWidget(outlier_subp) + + x_data = hvsr_data['x_freqs'] + curve_traces = [] + for hv in hvsr_data.hvsr_windows_df['HV_Curves'].iterrows(): + curve_traces.append(go.Scatter(x=x_data, y=hv[1])) + outlier_fig.add_traces(curve_traces) + + # Calculate a median curve, and reshape so same size as original + medCurve = np.nanmedian(np.stack(hvsr_data.hvsr_windows_df['HV_Curves']), axis=0) + outlier_fig.add_trace(go.Scatter(x=x_data, y=medCurve, line=dict(color='rgba(0,0,0,1)', width=1.5),showlegend=False)) + + minY = np.nanmin(np.stack(hvsr_data.hvsr_windows_df['HV_Curves'])) + maxY = np.nanmax(np.stack(hvsr_data.hvsr_windows_df['HV_Curves'])) + totalWindows = hvsr_data.hvsr_windows_df.shape[0] + #medCurveArr = np.tile(medCurve, (curr_data.shape[0], 1)) + + else: + no_subplots = 3 + outlier_fig.data = [] + outlier_fig.update_layout(grid=None) # Clear the existing grid layout + outlier_subp = subplots.make_subplots(rows=no_subplots, cols=1, horizontal_spacing=0.01, vertical_spacing=0.02, + row_heights=[1, 1, 1]) + outlier_fig.update_layout(grid={'rows': 3}) + outlier_fig = go.FigureWidget(outlier_subp) + + if hasattr(hvsr_data, 'hvsr_windows_df'): + rowDict = {'Z':1, 'E':2, 'N':3} + showTLabelsDict={'Z':False, 'E':False, 'N':True} + def comp_rgba(comp, a): + compstr = '' + if comp=='Z': + compstr = f'rgba(0, 0, 0, {a})' + if comp=='E': + compstr = f'rgba(50, 50, 250, {a})' + if comp=='N': + compstr = f'rgba(250, 50, 50, {a})' + return compstr + compNames = ['Z', 'E', 'N'] + rmse_to_plot=[] + med_traces=[] + + noRemoved = 0 + indRemoved = [] + for i, comp in enumerate(compNames): + if hasattr(hvsr_data, 'x_freqs'): + x_data = hvsr_data['x_freqs'][comp] + else: + x_data = [1/p for p in hvsr_data['ppsds'][comp]['period_xedges'][1:]] + column = 'psd_values_'+comp + # Retrieve data from dataframe (use all windows, just in case) + curr_data = np.stack(hvsr_data['hvsr_windows_df'][column]) + + # Calculate a median curve, and reshape so same size as original + medCurve = np.nanmedian(curr_data, axis=0) + medCurveArr = np.tile(medCurve, (curr_data.shape[0], 1)) + medTrace = go.Scatter(x=x_data, y=medCurve, line=dict(color=comp_rgba(comp, 1), width=1.5), + name=f'{comp} Component', showlegend=True) + # Calculate RMSE + rmse = np.sqrt(((np.subtract(curr_data, medCurveArr)**2).sum(axis=1))/curr_data.shape[1]) + + rmse_threshold = np.percentile(rmse, roc_kwargs['rmse_thresh']) + + # Retrieve index of those RMSE values that lie outside the threshold + timeIndex = hvsr_data['hvsr_windows_df'].index + for j, curve in enumerate(curr_data): + if rmse[j] > rmse_threshold: + badTrace = go.Scatter(x=x_data, y=curve, + line=dict(color=comp_rgba(comp, 1), width=1.5, dash='dash'), + #marker=dict(color=comp_rgba(comp, 1), size=3), + name=str(hvsr_data.hvsr_windows_df.index[j]), showlegend=False) + outlier_fig.add_trace(badTrace, row=rowDict[comp], col=1) + if j not in indRemoved: + indRemoved.append(j) + noRemoved += 1 + else: + goodTrace = go.Scatter(x=x_data, y=curve, + line=dict(color=comp_rgba(comp, 0.01)), name=str(hvsr_data.hvsr_windows_df.index[j]), showlegend=False) + outlier_fig.add_trace(goodTrace, row=rowDict[comp], col=1) + + timeIndRemoved = pd.DatetimeIndex([timeIndex[ind] for ind in indRemoved]) + hvsr_data['hvsr_windows_df'].loc[timeIndRemoved, 'Use'] = False + + outlier_fig.add_trace(medTrace, row=rowDict[comp], col=1) + + outlier_fig.update_xaxes(showticklabels=False, row=1, col=1) + outlier_fig.update_yaxes(title={'text':'Z'}, row=1, col=1) + outlier_fig.update_xaxes(showticklabels=False, row=2, col=1) + outlier_fig.update_yaxes(title={'text':'E'}, row=2, col=1) + outlier_fig.update_xaxes(showticklabels=True, row=3, col=1) + outlier_fig.update_yaxes(title={'text':'N'}, row=3, col=1) + + outlier_fig.update_layout(margin={"l":10, "r":10, "t":30, 'b':0}, showlegend=True, + title=f"{hvsr_data['site']} Outliers") + if comp == 'N': + minY = np.nanmin(curr_data) + maxY = np.nanmax(curr_data) + totalWindows = curr_data.shape[0] + + outlier_fig.add_annotation( + text=f"{len(indRemoved)}/{totalWindows} outlier windows removed", + x=np.log10(max(x_data)) - (np.log10(max(x_data))-np.log10(min(x_data))) * 0.01, + y=minY+(maxY-minY)*0.01, + xanchor="right", yanchor="bottom",#bgcolor='rgba(256,256,256,0.7)', + showarrow=False,row=no_subplots, col=1) + + + outlier_fig.update_xaxes(type='log') + with outlier_graph_widget: + clear_output(wait=True) + display(outlier_fig) + + if show_plot_check.value: + outlier_fig.show() + + return outlier_fig, hvsr_data + + # HVSR SETTINGS SUBTAB + h_combine_meth = widgets.Dropdown(description='Horizontal Combination Method', value=3, + options=[('1. Differential Field Assumption (not implemented)', 1), + ('2. Arithmetic Mean | H = (N + E)/2', 2), + ('3. Geometric Mean | H = √(N * E) (SESAME recommended)', 3), + ('4. Vector Summation | H = √(N^2 + E^2)', 4), + ('5. Quadratic Mean | H = √(N^2 + E^2)/2', 5), + ('6. Maximum Horizontal Value | H = max(N, E)', 6)], + style={'description_width': 'initial'}, layout=widgets.Layout(height='auto', width='auto'), disabled=False) + + freq_smoothing = widgets.Dropdown(description='Frequency Smoothing Operations', value='konno ohmachi', + options=[('Konno-Ohmachi', 'konno ohmachi'), + ('Constant','constant'), + ('Proportional', 'proportional'), + ('None', None)], + style={'description_width': 'initial'}, layout=widgets.Layout(height='auto', width='auto'), disabled=False) + freq_smooth_width = widgets.FloatText(description='Freq. Smoothing Width', style={'description_width': 'initial'}, + placeholder=40, value=40, layout=widgets.Layout(height='auto', width='auto'), disabled=False) + + resample_hv_curve_bool = widgets.Checkbox(layout=widgets.Layout(height='auto', width='auto'), style={'description_width': 'initial'}, value=True) + resample_hv_curve = widgets.IntText(description='Resample H/V Curve', style={'description_width': 'initial'}, + placeholder=500, value=500, layout=widgets.Layout(height='auto', width='auto'), disabled=False) + + smooth_hv_curve_bool = widgets.Checkbox(layout=widgets.Layout(height='auto', width='auto'), style={'description_width': 'initial'}, value=True) + smooth_hv_curve = widgets.IntText(description='Smooth H/V Curve', style={'description_width': 'initial'}, + placeholder=51, value=51, layout=widgets.Layout(height='auto', width='auto'), disabled=False) + + hvsr_band_hbox_hvsrSet = widgets.HBox([hvsr_band_min_box, hvsr_band_max_box],layout=widgets.Layout(height='auto', width='auto')) + + peak_freq_range_hbox_hvsrSet = widgets.HBox([peak_freq_range_min_box, peak_freq_range_max_box],layout=widgets.Layout(height='auto', width='auto')) + + peak_selection_type = widgets.Dropdown(description='Peak Selection Method', value='max', + options=[('Highest Peak', 'max'), + ('Best Scored','scored')], + style={'description_width': 'initial'}, layout=widgets.Layout(height='auto', width='auto'), disabled=False) + + process_hvsr_call_prefix = widgets.HTML(value='<style>p {word-wrap: break-word}</style> <p>' + 'process_hvsr' + '</p>', + layout=widgets.Layout(width='fill', justify_content='flex-end', align_content='flex-start')) + process_hvsr_call = widgets.HTML(value='()') + process_hvsr_call_hbox = widgets.HBox([process_hvsr_call_prefix, process_hvsr_call]) + + # Update process_hvsr call + def update_process_hvsr_call(): + ph_kwargs = get_process_hvsr_kwargs() + ph_text = f"""(hvsr_data=hvsr_data, + method={ph_kwargs['method']}, + smooth={ph_kwargs['smooth']}, + freq_smooth={ph_kwargs['freq_smooth']}, + f_smooth_width={ph_kwargs['f_smooth_width']}, + resample={ph_kwargs['resample']}, + outlier_curve_rmse_percentile={ph_kwargs['outlier_curve_rmse_percentile']}, + verbose={verbose_check.value})""" + process_hvsr_call.value='<style>p {word-wrap: break-word}</style> <p>' + ph_text + '</p>' + update_process_hvsr_call() + + check_peaks_call_prefix = widgets.HTML(value='<style>p {word-wrap: break-word}</style> <p>'+'check_peaks' + '</p>', + layout=widgets.Layout(width='fill', justify_content='flex-end',align_content='flex-start')) + check_peaks_call = widgets.HTML(value='()') + check_peaks_call_hbox = widgets.HBox([check_peaks_call_prefix, check_peaks_call]) + + # Update process_hvsr call + def update_check_peaks_call(): + cp_kwargs = get_check_peaks_kwargs() + cp_text = f"""(hvsr_data=hvsr_data, + hvsr_band={cp_kwargs['hvsr_band']}, + peak_selection={cp_kwargs['peak_selection']}, + peak_freq_range={cp_kwargs['peak_freq_range']}, + verbose={verbose_check.value})""" + check_peaks_call.value='<style>p {word-wrap: break-word}</style> <p>' + cp_text + '</p>' + update_check_peaks_call() + + freq_smooth_hbox = widgets.HBox([freq_smoothing, freq_smooth_width]) + resample_hbox = widgets.HBox([resample_hv_curve_bool, resample_hv_curve]) + smooth_hbox = widgets.HBox([smooth_hv_curve_bool, smooth_hv_curve]) + + # Set up vbox for hvsr_settings subtab + hvsr_settings_tab = widgets.VBox([h_combine_meth, + freq_smooth_hbox, + resample_hbox, + smooth_hbox, + hvsr_band_hbox_hvsrSet, + peak_freq_range_hbox_hvsrSet, + peak_selection_type, + process_hvsr_call_hbox, + check_peaks_call_hbox]) + + # PLOT SETTINGS SUBTAB + hv_plot_label = widgets.Label(value='HVSR Plot', layout=widgets.Layout(height='auto', width='auto', justify_content='center')) + component_plot_label = widgets.Label(value='Component Plot', layout=widgets.Layout(height='auto', width='auto', justify_content='center')) + spec_plot_label = widgets.Label(value='Spectrogram Plot', layout=widgets.Layout(height='auto', width='auto', justify_content='center')) + + use_plot_label = widgets.Label(value='Use Plot', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center')) + use_plot_hv = widgets.Checkbox(value=True, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + use_plot_comp = widgets.Checkbox(value=True, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + use_plot_spec = widgets.Checkbox(value=True, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + + comibne_plot_label = widgets.Label(value='Combine HV and Comp. Plot', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center')) + combine_hv_comp = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + + show_peak_label = widgets.Label(value='Show Best Peak', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center')) + show_best_peak_hv = widgets.Checkbox(value=True, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + show_best_peak_comp = widgets.Checkbox(value=True, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + show_best_peak_spec = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + + annotate_peak_label = widgets.Label(value='Annotate Best Peak', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center')) + ann_best_peak_hv = widgets.Checkbox(value=True, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + ann_best_peak_comp = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + ann_best_peak_spec = widgets.Checkbox(value=True, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + + show_all_peaks_label = widgets.Label(value='Show All Peaks', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center')) + show_all_peaks_hv = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + + show_all_curves_label = widgets.Label(value='Show All Curves', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center')) + show_all_curves_hv = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + show_all_curves_comp = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + + show_ind_peaks_label = widgets.Label(value='Show Individual Peaks', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center')) + show_ind_peaks_hv = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + show_ind_peaks_spec = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + + show_std_label = widgets.Label(value='Show Standard Deviation', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center')) + show_std_hv = widgets.Checkbox(value=True, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + show_std_comp = widgets.Checkbox(value=True, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + + show_legend_label = widgets.Label(value='Show Legend', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center')) + show_legend_hv = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + show_legend_comp = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + show_legend_spec = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'), + style={'description_width': 'initial'}) + + x_type_label = widgets.Label(value='X Type', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center')) + x_type = widgets.Dropdown(options=[('Frequency', 'freq'), ('Period', 'period')], + layout=widgets.Layout(height='auto', width='auto'), style={'description_width': 'initial'}) + + plotly_kwargs_label = widgets.Label(value='Plotly Kwargs', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center')) + plotly_kwargs = widgets.Text(style={'description_width': 'initial'}, + layout=widgets.Layout(height='auto', width='auto'), disabled=False) + + mpl_kwargs_label = widgets.Label(value='Matplotlib Kwargs', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center')) + mpl_kwargs = widgets.Text(style={'description_width': 'initial'}, + layout=widgets.Layout(height='auto', width='auto'), disabled=False) + + plot_hvsr_call = widgets.Label(value=f"Plot String: '{get_default(sprit_hvsr.get_report, 'plot_type')}'") + def update_plot_string(): + plot_hvsr_text = f"""Plot String: {get_get_report_kwargs()['plot_type']}""" + plot_hvsr_call.value = plot_hvsr_text + update_plot_string() + + update_plot_button = widgets.Button(description='Update Plot',button_style='info',layout=widgets.Layout(height='auto', width='auto')) + def manually_update_results_fig(change): + plot_string = get_get_report_kwargs()['plot_type'] + update_results_fig(hvsr_results, plot_string) + sprit_tabs.selected_index = 4 + + # Set up grid for ppsd_settings subtab + plot_settings_tab[0, 5:10] = hv_plot_label + plot_settings_tab[0, 10:15] = component_plot_label + plot_settings_tab[0, 15:] = spec_plot_label + + plot_settings_tab[1, :] = widgets.HTML('<hr>', layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center')) + + plot_settings_tab[2, :5] = use_plot_label + plot_settings_tab[2, 5:10] = use_plot_hv + plot_settings_tab[2, 10:15] = use_plot_comp + plot_settings_tab[2, 15:] = use_plot_spec + + plot_settings_tab[3, :] = widgets.HTML('<hr>', layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center')) + + plot_settings_tab[4, :5] = comibne_plot_label + plot_settings_tab[4, 10:15] = combine_hv_comp + + plot_settings_tab[5, :5] = show_peak_label + plot_settings_tab[5, 5:10] = show_best_peak_hv + plot_settings_tab[5, 10:15] = show_best_peak_comp + plot_settings_tab[5, 15:] = show_best_peak_spec + + plot_settings_tab[6, :5] = annotate_peak_label + plot_settings_tab[6, 5:10] = ann_best_peak_hv + plot_settings_tab[6, 10:15] = ann_best_peak_comp + plot_settings_tab[6, 15:] = ann_best_peak_spec + + plot_settings_tab[7, :5] = show_all_peaks_label + plot_settings_tab[7, 5:10] = show_all_peaks_hv + + plot_settings_tab[8, :5] = show_all_curves_label + plot_settings_tab[8, 5:10] = show_all_curves_hv + plot_settings_tab[8, 10:15] = show_all_curves_comp + + plot_settings_tab[9, :5] = show_ind_peaks_label + plot_settings_tab[9, 5:10] = show_ind_peaks_hv + plot_settings_tab[9, 15:] = show_ind_peaks_spec + + plot_settings_tab[10, :5] = show_std_label + plot_settings_tab[10, 5:10] = show_std_hv + plot_settings_tab[10, 10:15] = show_std_comp + + plot_settings_tab[11, :5] = show_legend_label + plot_settings_tab[11, 5:10] = show_legend_hv + plot_settings_tab[11, 10:15] = show_legend_comp + plot_settings_tab[11, 15:] = show_legend_spec + + plot_settings_tab[12, :] = widgets.HTML('<hr>', layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center')) + + plot_settings_tab[13, :5] = x_type_label + plot_settings_tab[13, 6:] = x_type + + plot_settings_tab[14, :5] = plotly_kwargs_label + plot_settings_tab[14, 6:] = plotly_kwargs + + plot_settings_tab[15, :5] = mpl_kwargs_label + plot_settings_tab[15, 6:] = mpl_kwargs + + plot_settings_tab[16, :] = widgets.HTML('<hr>', layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center')) + + plot_settings_tab[17, :18] = plot_hvsr_call + plot_settings_tab[17, 18:] = update_plot_button + update_plot_button.on_click(manually_update_results_fig) + + # Place everything in Settings Tab + settings_subtabs = widgets.Tab([ppsd_settings_tab, hvsr_settings_tab, outlier_settings_tab, plot_settings_tab]) + settings_tab = widgets.VBox(children=[settings_subtabs, settings_progress_hbox]) + settings_subtabs.set_title(0, "PPSD Settings") + settings_subtabs.set_title(1, "HVSR Settings") + settings_subtabs.set_title(2, "Outlier Settings") + settings_subtabs.set_title(3, "Plot Settings") + + # LOG TAB - not currently using + log_tab = widgets.VBox(children=[log_textArea]) + #log_textArea = widgets.Textarea(value="SESSION LOG", disabled=True, layout={'height': '99%','width': '99%', 'overflow': 'scroll'}) + + # RESULTS TAB + # PLOT SUBTAB + global results_subp + results_subp = subplots.make_subplots(rows=3, cols=1, horizontal_spacing=0.01, vertical_spacing=0.01, row_heights=[2,1,1]) + results_fig = go.FigureWidget(results_subp) + global results_graph_widget + results_graph_widget = widgets.Output() + + with results_graph_widget: + display(results_fig) + + global printed_results_textArea + printed_results_textArea = widgets.Textarea(value="RESULTS", disabled=True, layout={'height': '500px','width': '99%', 'overflow': 'scroll'}) + + global results_table + initialTableCols=['SiteName', 'Acq_Date', 'Longitude', 'Latitude', 'Elevation', + 'PeakFrequency', 'WindowLengthFreq.', 'SignificantCycles', 'LowCurveStDevOverTime', + 'PeakProminenceBelow', 'PeakProminenceAbove', 'PeakAmpClarity', + 'FreqStability', 'PeakStability_FreqStD', 'PeakStability_AmpStD', 'PeakPasses'] + results_table = widgets.HTML(value=pd.DataFrame(columns=initialTableCols).to_html()) + + # A text box labeled Data Filepath + export_results_table_filepath = widgets.Text(description='Export Filepath:', + placeholder='', value='', + style={'description_width': 'initial'},layout=widgets.Layout(width='90%')) + + export_results_table_read_button = widgets.Button(description='', icon='fa-file-import',button_style='success', + layout=widgets.Layout(width='10%')) + export_results_table_browse_button = widgets.Button(description='Export Table', + layout=widgets.Layout(width='10%')) + def export_results_table(button): + try: + if button.value == 'Export Table': + root = tk.Tk() + root.wm_attributes('-topmost', True) + root.withdraw() + export_results_table_filepath.value = str(filedialog.asksaveasfilename(defaultextension='.csv', title='Save CSV Report')) + root.destroy() + except Exception as e: + print(e) + export_results_table_browse_button.disabled=True + export_results_table_browse_button.description='Use Text Field' + + out_path = export_results_table_filepath.value + sprit_hvsr.get_report(hvsr_results, report_format='csv', export_path=out_path, + csv_overwrite_opt='overwrite') + + export_results_table_browse_button.on_click(export_results_table) + export_results_table_read_button.on_click(export_results_table) + + results_table_export_hbox = widgets.HBox([export_results_table_filepath, export_results_table_read_button, export_results_table_browse_button]) + results_table_vbox = widgets.VBox([results_table, results_table_export_hbox]) + global results_tab + results_subtabs = widgets.Tab([results_graph_widget, printed_results_textArea, results_table_vbox]) + results_tab = widgets.VBox(children=[results_subtabs]) + results_subtabs.set_title(0, "Plot") + results_subtabs.set_title(1, "Peak Tests") + results_subtabs.set_title(2, "Peak Table") + + widget_param_dict = { + 'fetch_data': + {'source': data_source_type, + 'trim_dir': trim_directory, + 'export_format': trim_export_dropdown, + 'detrend': detrend_type_dropdown, + 'detrend_order': detrend_order, + 'verbose': verbose_check}, + 'remove_noise': + { + 'sat_percent': max_saturation_pct, + 'noise_percent': max_window_pct, + 'sta': sta, + 'lta': lta, + 'stalta_thresh': [stalta_thresh_low, stalta_thresh_hi], + 'warmup_time': warmup_time, + 'cooldown_time': cooldown_time, + 'min_win_size': noisy_window_length, + 'remove_raw_noise': raw_data_remove_check, + 'verbose': verbose_check}, + 'generate_ppsds': + {'verbose': verbose_check, + 'skip_on_gaps':skip_on_gaps, + 'db_bins':[db_bins_min, db_bins_max, db_bins_step], + 'ppsd_length':ppsd_length, + 'overlap':overlap_pct, + 'special_handling':special_handling_dropdown, + 'period_smoothing_width_octaves':period_smoothing_width, + 'period_step_octaves':period_step_octave, + 'period_limits':[hvsr_band_min_box, hvsr_band_max_box]}, + 'process_hvsr': + {'method': h_combine_meth, + 'smooth': smooth_hv_curve, + 'freq_smooth': freq_smoothing, + 'f_smooth_width': freq_smooth_width, + 'resample': resample_hv_curve, + 'verbose': verbose_check}, + 'remove_outlier_curves': + {'rmse_thresh': rmse_thresh, + 'use_percentile': rmse_pctile_check, + 'use_hv_curve': use_hv_curve_rmse, + 'verbose': verbose_check}, + 'check_peaks': + {'hvsr_band': [hvsr_band_min_box, hvsr_band_max_box], + 'peak_freq_range': [peak_freq_range_min_box, peak_freq_range_max_box], + 'verbose': verbose_check}, + 'get_report': + { + 'export_path': export_results_table_filepath, + 'verbose': verbose_check}} + + # SPRIT WIDGET + # Add all a tab and add the grid to it + global sprit_tabs + sprit_tabs = widgets.Tab([input_tab, preview_tab, settings_tab, log_tab, results_tab]) + sprit_tabs.set_title(0, "Input") + sprit_tabs.set_title(1, "Preview") + sprit_tabs.set_title(2, "Settings") + sprit_tabs.set_title(3, "Log") + sprit_tabs.set_title(4, "Results") + + sprit_title = widgets.Label(value='SPRIT', layout=widgets.Layout(width='150px')) + sprit_subtitle = widgets.Label(value='Tools for ambient siesmic noise analysis using HVSR', + layout=widgets.Layout(flex='1', justify_content='flex-start', align_content='flex-end')) + + # Function to open a link + def open_dist(button): + link = 'https://pypi.org/project/sprit/' + webbrowser.open_new_tab(link) + + def open_repo(button): + link = 'https://github.com/RJbalikian/SPRIT-HVSR' + webbrowser.open_new_tab(link) + + def open_docs(button): + link = 'https://rjbalikian.github.io/SPRIT-HVSR/main.html' + webbrowser.open_new_tab(link) + + sourcebutton = widgets.Button(description="PyPI", + layout=widgets.Layout(width='4%', justify_content='flex-end',align_content='flex-end')) + repobutton = widgets.Button(description="Repo", + layout=widgets.Layout(width='4%', justify_content='flex-end',align_content='flex-end')) + docsbutton = widgets.Button(description="Docs", + layout=widgets.Layout(width='8%', justify_content='flex-end',align_content='flex-end')) + + # Attach the open_link function to the button's on_click event + sourcebutton.on_click(open_dist) + repobutton.on_click(open_repo) + docsbutton.on_click(open_docs) + + titlehbox = widgets.HBox([sprit_title, sprit_subtitle, repobutton, sourcebutton, docsbutton], + layout = widgets.Layout(align_content='space-between')) + + title_style = { + 'font_family': 'Arial, sans-serif', + 'font_size': '36px', + 'font_weight': 'bold', + 'color': 'black' + } + + # Apply the style to the label + sprit_title.style = title_style + + sprit_widget = widgets.VBox([titlehbox, sprit_tabs]) + + def observe_children(widget, callback): + if hasattr(widget, 'children'): + for child in widget.children: + child.observe(callback) + observe_children(child, callback) + + def any_update(change): + update_input_param_call() + update_fetch_data_call() + update_remove_noise_call() + update_generate_ppsd_call() + update_process_hvsr_call() + update_remove_outlier_curve_call() + update_check_peaks_call() + update_plot_string() + + observe_children(sprit_tabs, any_update) + + # Display the tab + display(sprit_widget)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_modules/sprit/sprit_plot.html b/docs/_modules/sprit/sprit_plot.html new file mode 100644 index 00000000..24de8be4 --- /dev/null +++ b/docs/_modules/sprit/sprit_plot.html @@ -0,0 +1,1301 @@ + + + + + + sprit.sprit_plot — sprit 1.4 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for sprit.sprit_plot

+import datetime
+import inspect
+import os
+import pathlib
+import webbrowser
+
+from zoneinfo import available_timezones
+
+import ipywidgets as widgets
+from IPython.display import display, clear_output
+import numpy as np
+import pandas as pd
+import plotly.express as px
+import plotly.graph_objs as go
+import plotly.subplots as subplots
+from scipy import signal
+
+try:
+    import sprit_hvsr
+except:
+    import sprit.sprit_hvsr as sprit_hvsr
+
+
+[docs] +def read_data(button): + progress_bar.value = 0 + log_textArea.value += f"\n\nREADING DATA [{datetime.datetime.now()}]" + + ip_kwargs = get_input_params() + hvsr_data = sprit_hvsr.input_params(**ip_kwargs, verbose=verbose_check.value) + log_textArea.value += f"\n\n{datetime.datetime.now()}\ninput_params():\n'{ip_kwargs}" + if button.description=='Read Data': + progress_bar.value=0.333 + else: + progress_bar.value=0.1 + fd_kwargs = get_fetch_data_params() + hvsr_data = sprit_hvsr.fetch_data(hvsr_data, **fd_kwargs, verbose=verbose_check.value) + log_textArea.value += '\n\n'+str(datetime.datetime.now())+'\nfetch_data():\n\t'+str(fd_kwargs) + if button.description=='Read Data': + progress_bar.value=0.666 + else: + progress_bar.value=0.2 + + use_hv_curve_rmse.value=False + use_hv_curve_rmse.disabled=True + + update_preview_fig(hvsr_data, preview_fig) + + if button.description=='Read Data': + sprit_tabs.selected_index = 1 + progress_bar.value=0 + return hvsr_data
+ + +
+[docs] +def get_remove_noise_kwargs(): + def get_remove_method(): + remove_method_list=[] + do_stalta = stalta_check.value + do_sat_pct = max_saturation_check.value + do_noiseWin=noisy_windows_check.value + do_warmcool=warmcool_check.value + + if auto_remove_check.value: + remove_method_list=['stalta', 'saturation', 'noise', 'warmcool'] + else: + if do_stalta: + remove_method_list.append('stalta') + if do_sat_pct: + remove_method_list.append('saturation') + if do_noiseWin: + remove_method_list.append('noise') + if do_warmcool: + remove_method_list.append('warmcool') + + if not remove_method_list: + remove_method_list = None + return remove_method_list + + remove_noise_kwargs = {'remove_method':get_remove_method(), + 'sat_percent':max_saturation_pct.value, + 'noise_percent':max_window_pct.value, + 'sta':sta.value, + 'lta':lta.value, + 'stalta_thresh':[stalta_thresh_low.value, stalta_thresh_hi.value], + 'warmup_time':warmup_time.value, + 'cooldown_time':cooldown_time.value, + 'min_win_size':noisy_window_length.value, + 'remove_raw_noise':raw_data_remove_check.value, + 'verbose':verbose_check.value} + return remove_noise_kwargs
+ + +
+[docs] +def get_generate_ppsd_kwargs(): + ppsd_kwargs = { + 'skip_on_gaps':skip_on_gaps.value, + 'db_bins':[db_bins_min.value, db_bins_max.value, db_bins_step.value], + 'ppsd_length':ppsd_length.value, + 'overlap':overlap_pct.value, + 'special_handling':special_handling_dropdown.value, + 'period_smoothing_width_octaves':period_smoothing_width.value, + 'period_step_octaves':period_step_octave.value, + 'period_limits':[period_limits_min.value, period_limits_max.value], + 'verbose':verbose_check.value + } + + if str(ppsd_kwargs['special_handling']).lower() == 'none': + ppsd_kwargs['special_handling'] = None + return ppsd_kwargs
+ + +
+[docs] +def get_remove_outlier_curve_kwargs(): + roc_kwargs = { + 'use_percentile':rmse_pctile_check.value, + 'rmse_thresh':rmse_thresh.value, + 'use_hv_curve':False, + 'verbose':verbose_check.value + } + return roc_kwargs
+ + +
+[docs] +def get_process_hvsr_kwargs(): + if smooth_hv_curve_bool.value: + smooth_value = smooth_hv_curve.value + else: + smooth_value = smooth_hv_curve_bool.value + + if resample_hv_curve_bool.value: + resample_value = resample_hv_curve.value + else: + resample_value = resample_hv_curve_bool.value + + ph_kwargs={'method':h_combine_meth.value, + 'smooth':smooth_value, + 'freq_smooth':freq_smoothing.value, + 'f_smooth_width':freq_smooth_width.value, + 'resample':resample_value, + 'outlier_curve_rmse_percentile':use_hv_curve_rmse.value, + 'verbose':verbose_check.value} + return ph_kwargs
+ + +
+[docs] +def get_check_peaks_kwargs(): + cp_kwargs = {'hvsr_band':[hvsr_band_min_box.value, hvsr_band_max_box.value], + 'peak_freq_range':[peak_freq_range_min_box.value, peak_freq_range_max_box.value], + 'peak_selection':peak_selection_type.value, + 'verbose':verbose_check.value} + return cp_kwargs
+ + +
+[docs] +def get_get_report_kwargs(): + def get_formatted_plot_str(): + # Initialize plot string + hvsr_plot_str = '' + comp_plot_str = '' + spec_plot_str = '' + + # Whether to use each plot + if use_plot_hv.value: + hvsr_plot_str=hvsr_plot_str + "HVSR" + if use_plot_comp.value: + comp_plot_str=comp_plot_str + "C" + if use_plot_spec.value: + spec_plot_str=spec_plot_str + "SPEC" + + # Whether components be on the same plot as HV curve? + if not combine_hv_comp.value: + comp_plot_str=comp_plot_str + "+" + else: + comp_plot_str=comp_plot_str.replace('+','') + + # Whether to show (log) standard deviations + if not show_std_hv.value: + hvsr_plot_str=hvsr_plot_str + " -s" + if not show_std_comp.value: + comp_plot_str=comp_plot_str + " -s" + + # Whether to show all peaks + if show_all_peaks_hv.value: + hvsr_plot_str=hvsr_plot_str + " all" + + # Whether curves from each time window are shown + if show_all_curves_hv.value: + hvsr_plot_str=hvsr_plot_str + " t" + if show_all_curves_comp.value: + comp_plot_str=comp_plot_str + " t" + + # Whether the best peak is displayed + if show_best_peak_hv.value: + hvsr_plot_str=hvsr_plot_str + " p" + if show_best_peak_comp.value: + comp_plot_str=comp_plot_str + " p" + if show_best_peak_spec.value: + spec_plot_str=spec_plot_str + " p" + + # Whether best peak value is annotated + if ann_best_peak_hv.value: + hvsr_plot_str=hvsr_plot_str + " ann" + if ann_best_peak_comp.value: + comp_plot_str=comp_plot_str + " ann" + if ann_best_peak_spec.value: + spec_plot_str=spec_plot_str + " ann" + + # Whether peaks from individual time windows are shown + if show_ind_peaks_hv.value: + hvsr_plot_str=hvsr_plot_str + " tp" + if show_ind_peaks_spec.value: + spec_plot_str=spec_plot_str + ' tp' + + # Whether to show legend + if show_legend_hv.value: + hvsr_plot_str=hvsr_plot_str + " leg" + if ann_best_peak_comp.value: + comp_plot_str=comp_plot_str + " leg" + if show_legend_spec.value: + spec_plot_str=spec_plot_str + " leg" + + # Combine string into one + plot_str = hvsr_plot_str + ' ' + comp_plot_str+ ' ' + spec_plot_str + return plot_str + + gr_kwargs = {'report_format':['print','csv'], + 'plot_type':get_formatted_plot_str(), + 'export_path':None, + 'csv_overwrite_opt':'overwrite', + 'no_output':False, + 'verbose':verbose_check.value + } + return gr_kwargs
+ + +
+[docs] +def process_data(button): + startProc=datetime.datetime.now() + progress_bar.value = 0 + log_textArea.value += f"\n\nPROCESSING DATA [{startProc}]" + global hvsr_data + # Read data again only if internal hvsr_data input_data variable is different from what is in the gui + if not 'hvsr_data' in globals() or not hasattr(hvsr_data, 'input_data') or \ + (pathlib.Path(hvsr_data.input_data).as_posix() != pathlib.Path(data_filepath.value).as_posix()): + hvsr_data = read_data(button) + + remove_noise_kwargs = get_remove_noise_kwargs() + hvsr_data = sprit_hvsr.remove_noise(hvsr_data, **remove_noise_kwargs) + log_textArea.value += f"\n\n{datetime.datetime.now()}\nremove_noise()\n\t{remove_noise_kwargs}" + progress_bar.value = 0.3 + + generate_ppsd_kwargs = get_generate_ppsd_kwargs() + hvsr_data = sprit_hvsr.generate_ppsds(hvsr_data, **generate_ppsd_kwargs) + progress_bar.value = 0.5 + log_textArea.value += f"\n\n{datetime.datetime.now()}\ngenerate_ppsds()\n\t{generate_ppsd_kwargs}" + + + # If this was started by clicking "Generate PPSDs", stop here + if button.description == 'Generate PPSDs': + return + + ph_kwargs = get_process_hvsr_kwargs() + hvsr_data = sprit_hvsr.process_hvsr(hvsr_data, **ph_kwargs) + log_textArea.value += f"\n\n{datetime.datetime.now()}\nprocess_hvsr()\n\t{ph_kwargs}" + progress_bar.value = 0.75 + update_outlier_fig() + + roc_kwargs = get_remove_outlier_curve_kwargs() + hvsr_data = sprit_hvsr.remove_outlier_curves(hvsr_data, **roc_kwargs) + log_textArea.value += f"\n\n{datetime.datetime.now()}\nremove_outlier_curves()\n\t{roc_kwargs}" + progress_bar.value = 0.85 + outlier_fig, hvsr_data = update_outlier_fig() + + use_hv_curve_rmse.value=False + use_hv_curve_rmse.disabled=False + + def get_rmse_range(): + minRMSE = 10000 + maxRMSE = -1 + if roc_kwargs['use_hv_curve']: + colnames = ['HV_Curves'] + else: + colnames = ['psd_values_Z', + 'psd_values_E', + 'psd_values_N'] + dataList = [] + for col in colnames: + dataArr = np.stack(hvsr_data.hvsr_windows_df[col]) + medCurveArr = np.nanmedian(dataArr, axis=0) + rmse = np.sqrt(((np.subtract(dataArr, medCurveArr)**2).sum(axis=1))/dataArr.shape[1]) + if rmse.min() < minRMSE: + minRMSE = rmse.min() + if rmse.max() > maxRMSE: + maxRMSE = rmse.max() + rmse_thresh_slider.min = minRMSE + rmse_thresh_slider.max = maxRMSE + rmse_thresh_slider.step = round((maxRMSE-minRMSE)/100, 2) + rmse_thresh_slider.value = maxRMSE + get_rmse_range() + + cp_kwargs = get_check_peaks_kwargs() + hvsr_data = sprit_hvsr.check_peaks(hvsr_data, **cp_kwargs) + log_textArea.value += f"\n\n{datetime.datetime.now()}\ncheck_peaks()\n\t{cp_kwargs}" + progress_bar.value = 0.9 + + gr_kwargs = get_get_report_kwargs() + hvsr_data = sprit_hvsr.get_report(hvsr_data, **gr_kwargs) + log_textArea.value += f"\n\n{datetime.datetime.now()}\nget_report()\n\t{gr_kwargs}\n\n" + hvsr_data.get_report(report_format='print') # Just in case print wasn't included + log_textArea.value += hvsr_data['Print_Report'] + printed_results_textArea.value = hvsr_data['Print_Report'] + hvsr_data.get_report(report_format='csv') + results_table.value = hvsr_data['CSV_Report'].to_html() + + log_textArea.value += f'Processing time: {datetime.datetime.now() - startProc}' + progress_bar.value = 0.95 + + update_results_fig(hvsr_data, gr_kwargs['plot_type']) + + progress_bar.value = 1 + global hvsr_results + hvsr_results = hvsr_data + return hvsr_results
+ + +
+[docs] +def parse_plot_string(plot_string): + plot_list = plot_string.split() + + hvsrList = ['hvsr', 'hv', 'h'] + compList = ['component', 'comp', 'c'] + compPlus = [item + '+' for item in compList] + specList = ['spectrogram', 'specgram', 'spec','sg', 's'] + + hvInd = np.nan + compInd = np.nan + specInd = np.nan + + hvIndFound = False + compIndFound = False + specIndFound = False + + for i, item in enumerate(plot_list): + if item.lower() in hvsrList and not hvIndFound: + # assign the index + hvInd = i + hvIndFound = True + if (item.lower() in compList or item.lower() in compPlus) and not compIndFound: + # assign the index + compInd = i + compIndFound = True + if item.lower() in specList and not specIndFound: + # assign the index + specInd = i + specIndFound = True + + # Get individual plot lists (should already be correctly ordered) + if hvInd is np.nan: + hvsr_plot_list = ['HVSR'] + + if compInd is np.nan: + comp_plot_list = [] + if specInd is np.nan: + if hvInd is not np.nan: + hvsr_plot_list = plot_list + spec_plot_list = [] + else: + if hvInd is not np.nan: + hvsr_plot_list = plot_list[hvInd:specInd] + spec_plot_list = plot_list[specInd:] + else: + if hvInd is not np.nan: + hvsr_plot_list = plot_list[hvInd:compInd] + + if specInd is np.nan: + comp_plot_list = plot_list[compInd:] + spec_plot_list = [] + else: + comp_plot_list = plot_list[compInd:specInd] + spec_plot_list = plot_list[specInd:] + + # Figure out how many subplots there will be + plot_list_list = [hvsr_plot_list, comp_plot_list, spec_plot_list] + + return plot_list_list
+ + +
+[docs] +def parse_hv_plot_list(hv_data, hvsr_plot_list, results_fig=None, azimuth='HV'): + hvsr_data = hv_data + hv_plot_list = hvsr_plot_list[0] + x_data = hvsr_data.x_freqs['Z'] + hvsrDF = hvsr_data.hvsr_windows_df + + plotymax = max(hvsr_data.hvsrp2['HV']) + (max(hvsr_data.hvsrp2['HV']) - max(hvsr_data.hvsr_curve)) + ylim = [0, plotymax] + + + if results_fig is None: + results_fig = go.Figure() + + if azimuth == 'HV': + HVCol = 'HV_Curves' + else: + HVCol = 'HV_Curves_'+azimuth + + if 'tp' in hv_plot_list: + allpeaks = [] + for row in hvsrDF[hvsrDF['Use']]['CurvesPeakFreqs_'+azimuth].values: + for peak in row: + allpeaks.append(peak) + allInd = [] + for row, peakList in enumerate(hvsrDF[hvsrDF['Use']]['CurvesPeakIndices_'+azimuth].values): + for ind in peakList: + allInd.append((row, ind)) + x_vals = [] + y_vals = [] + y_max = np.nanmax(hvsr_data.hvsrp[azimuth]) + hvCurveInd = list(hvsrDF.columns).index(HVCol) + + for i, tp in enumerate(allpeaks): + x_vals.extend([tp, tp, None]) # add two x values and a None + y_vals.extend([0, hvsrDF.iloc[allInd[i][0], hvCurveInd][allInd[i][1]], None]) # add the first and last y values and a None + + results_fig.add_trace(go.Scatter(x=x_vals, y=y_vals, mode='lines', + line=dict(width=4, dash="solid", + color="rgba(128,0,0,0.1)"), + name='Best Peaks Over Time'), + row=1, col=1) + + if 't' in hv_plot_list: + alltimecurves = np.stack(hvsrDF[hvsrDF['Use']][HVCol]) + for i, row in enumerate(alltimecurves): + if i==0: + showLeg = True + else: + showLeg= False + results_fig.add_trace(go.Scatter(x=x_data[:-1], y=row, mode='lines', + line=dict(width=0.5, dash="solid", + color="rgba(100, 110, 100, 0.8)"), + showlegend=showLeg, + name='Ind. time win. curve', + hoverinfo='none'), + row=1, col=1) + + if 'all' in hv_plot_list: + for i, p in enumerate(hvsr_data['hvsr_peak_freqs'][azimuth]): + if i==0: + showLeg = True + else: + showLeg= False + + results_fig.add_trace(go.Scatter( + x=[p, p, None], # set x to None + y=[0, np.nanmax(np.stack(hvsrDF[HVCol])),None], # set y to None + mode="lines", # set mode to lines + line=dict(width=1, dash="dot", color="gray"), # set line properties + name="All checked peaks", # set legend name + showlegend=showLeg), + row=1, col=1) + + if 'fr' in hv_plot_list: + lowX = [hvsr_data.hvsr_band[0], hvsr_data.hvsr_band[0]] + lowWinX = [hvsr_data.peak_freq_range[0], hvsr_data.peak_freq_range[0]] + hiWinX = [hvsr_data.peak_freq_range[1], hvsr_data.peak_freq_range[1]] + hiX = [hvsr_data.hvsr_band[1], hvsr_data.hvsr_band[1]] + + yPts = ylim + + # Show windows where peak_freq_range is + results_fig.add_trace(go.Scatter(x=lowWinX, y=yPts, + line={'color':'black', 'width':0.1}, marker=None, + fill='tozerox', fillcolor="rgba(128, 100, 100, 0.6)", + showlegend=False, name='Peak Frequency exclusion range', + hoverinfo='none'), + row=1, col=1) + + results_fig.add_trace(go.Scatter(x=hiWinX, y=yPts, + line={'color':'black', 'width':0.1},marker=None, + showlegend=False, + hoverinfo='none'), + row=1, col=1) + + results_fig.add_trace(go.Scatter(x=hiX, y=yPts, + line=None, marker=None, + fill='tonextx', fillcolor="rgba(128, 100, 100, 0.6)", + name='Peak frequency exclusion range', hoverinfo='none'), + row=1, col=1) + + if '-s' not in hv_plot_list: + # Show standard deviation + results_fig.add_trace(go.Scatter(x=x_data, y=hvsr_data.hvsrp2[azimuth], + line={'color':'black', 'width':0.1},marker=None, + showlegend=False, name='Log. St.Dev. Upper', + hoverinfo='none'), + row=1, col=1) + + results_fig.add_trace(go.Scatter(x=x_data, y=hvsr_data.hvsrm2[azimuth], + line={'color':'black', 'width':0.1},marker=None, + fill='tonexty', fillcolor="rgba(128, 128, 128, 0.6)", + name='Log. St.Dev.', hoverinfo='none'), + row=1, col=1) + + if 'p' in hv_plot_list: + results_fig.add_trace(go.Scatter( + x=[hvsr_data['BestPeak'][azimuth]['f0'], hvsr_data['BestPeak'][azimuth]['f0'], None], # set x to None + y=[0,np.nanmax(np.stack(hvsrDF['HV_Curves'])),None], # set y to None + mode="lines", # set mode to lines + line=dict(width=1, dash="dash", color="black"), # set line properties + name="Best Peak"), + row=1, col=1) + + if 'ann' in hv_plot_list: + # Annotate best peak + results_fig.add_annotation(x=np.log10(hvsr_data['BestPeak'][azimuth]['f0']), + y=0, yanchor='bottom', xanchor='center', + text=f"{hvsr_data['BestPeak'][azimuth]['f0']:.3f} Hz", + bgcolor='rgba(255, 255, 255, 0.7)', + showarrow=False, + row=1, col=1) + return results_fig
+ + +
+[docs] +def parse_comp_plot_list(hv_data, comp_plot_list, plot_with_hv=False, results_fig=None, azimuth='HV'): + + hvsr_data = hv_data + if results_fig is None: + results_fig=go.Figure() + + # Initial setup + x_data = hvsr_data.x_freqs['Z'] + hvsrDF = hvsr_data.hvsr_windows_df + + same_plot = False + if plot_with_hv: + same_plot = True + else: + same_plot = ((comp_plot_list != []) and ('+' not in comp_plot_list[0])) + + if same_plot: + yaxis_to_use = 'y2' + use_secondary = True + transparency_modifier = 0.5 + else: + yaxis_to_use = 'y' + use_secondary=False + transparency_modifier = 1 + + # Keep components if azimuth is used, but make them lighter + if len(hvsr_data.hvsr_az.keys()) > 0: + h_transparency_modifier = transparency_modifier * 0.5 + else: + h_transparency_modifier = transparency_modifier + + v_transparency_modifier = transparency_modifier + az_transparency_modifier = transparency_modifier + + + h_alpha = 0.4 * h_transparency_modifier + v_alpha = 0.4 * v_transparency_modifier + az_alpha = 0.4 * az_transparency_modifier + components = ['Z', 'E', 'N'] + + compColor_semi_light = {'Z':f'rgba(128,128,128,{v_alpha})', + 'E':f'rgba(0,0,128,{h_alpha})', + 'N':f'rgba(128,0,0,{h_alpha})'} + + h_alpha = 0.7 * h_transparency_modifier + v_alpha = 0.7 * v_transparency_modifier + az_alpha = 0.7 * az_transparency_modifier + compColor_semi = {'Z':f'rgba(128,128,128,{v_alpha})', + 'E':f'rgba(100,100,128,{h_alpha})', + 'N':f'rgba(128,100,100,{h_alpha})'} + + compColor = {'Z':f'rgba(128,128,128,{v_alpha})', + 'E':f'rgba(100,100,250,{h_alpha})', + 'N':f'rgba(250,100,100,{h_alpha})'} + + for az in hvsr_data.hvsr_az.keys(): + components.append(az) + compColor_semi_light[az] = f'rgba(0,128,0,{az_alpha})' + compColor_semi[az] = f'rgba(100,128,100,{az_alpha})' + compColor[az] = f'rgba(100,250,100,{az_alpha})' + + # Whether to plot in new subplot or not + if same_plot: + compRow=1 + else: + compRow=2 + + # Whether to plot individual time curves + if 't' in comp_plot_list: + for comp in components: + alltimecurves = np.stack(hvsrDF[hvsrDF['Use']]['psd_values_'+comp]) + for i, row in enumerate(alltimecurves): + if i==0: + showLeg = True + else: + showLeg= False + + results_fig.add_trace(go.Scatter(x=x_data[:-1], y=row, mode='lines', + line=dict(width=0.5, dash="solid", + color=compColor_semi[comp]), + name='Ind. time win. curve', + showlegend=False, + hoverinfo='none', + yaxis=yaxis_to_use), + secondary_y=use_secondary, + row=compRow, col=1) + + # Code to plot standard deviation windows, if not removed + if '-s' not in comp_plot_list: + for comp in components: + # Show standard deviation + results_fig.add_trace(go.Scatter(x=x_data, y=hvsr_data.ppsd_std_vals_p[comp], + line={'color':compColor_semi_light[comp], 'width':0.1},marker=None, + showlegend=False, name='Log. St.Dev. Upper', + hoverinfo='none', + yaxis=yaxis_to_use), + secondary_y=use_secondary, + row=compRow, col=1) + + results_fig.add_trace(go.Scatter(x=x_data, y=hvsr_data.ppsd_std_vals_m[comp], + line={'color':compColor_semi_light[comp], 'width':0.1},marker=None, + fill='tonexty', fillcolor=compColor_semi_light[comp], + name=f'St.Dev. [{comp}]', hoverinfo='none', showlegend=False, + yaxis=yaxis_to_use), + secondary_y=use_secondary, + row=compRow, col=1) + + # Code to plot location of best peak + if 'p' in comp_plot_list: + minVal = 10000 + maxVal = -10000 + for comp in components: + currPPSDCurve = hvsr_data['psd_values_tavg'][comp] + if np.nanmin(currPPSDCurve) < minVal: + minVal = np.nanmin(currPPSDCurve) + if np.nanmax(currPPSDCurve) > maxVal: + maxVal = np.nanmax(currPPSDCurve) + + results_fig.add_trace(go.Scatter( + x=[hvsr_data['BestPeak'][azimuth]['f0'], hvsr_data['BestPeak'][azimuth]['f0'], None], # set x to None + y=[minVal,maxVal,None], # set y to None + mode="lines", # set mode to lines + line=dict(width=1, dash="dash", color="black"), # set line properties + name="Best Peak", + yaxis=yaxis_to_use), + secondary_y=use_secondary, + row=compRow, col=1) + + # Code to annotate value of best peak + if 'ann' in comp_plot_list: + minVal = 1e6 # A high number to compare against (comparer should always be lower) + for comp in components: + currPPSDCurve = hvsr_data['ppsd_std_vals_m'][comp] + if np.nanmin(currPPSDCurve) < minVal: + minVal = np.nanmin(currPPSDCurve) + results_fig.add_annotation(x=np.log10(hvsr_data['BestPeak'][azimuth]['f0']), + y=minVal, + text=f"{hvsr_data['BestPeak'][azimuth]['f0']:.3f} Hz", + bgcolor='rgba(255, 255, 255, 0.7)', + showarrow=False, + yref=yaxis_to_use, + secondary_y=use_secondary, + row=compRow, col=1) + + # Plot the main averaged component PPSDs + for comp in components: + results_fig.add_trace(go.Scatter(x=hvsr_data.x_freqs[comp], + y=hvsr_data['psd_values_tavg'][comp], + line=dict(width=2, dash="solid", + color=compColor[comp]),marker=None, + name='PPSD Curve '+comp, + yaxis=yaxis_to_use), + secondary_y=use_secondary, + row=compRow, col='all') + + # If new subplot, update accordingly + if compRow==2: + results_fig.update_xaxes(type='log', + range=[np.log10(hvsr_data['hvsr_band'][0]), np.log10(hvsr_data['hvsr_band'][1])], + row=compRow, col=1) + return results_fig
+ + +
+[docs] +def parse_spec_plot_list(hv_data, spec_plot_list, subplot_num, results_fig=None, azimuth='HV'): + hvsr_data = hv_data + + if results_fig is None: + results_fig=go.Figure() + + if azimuth == 'HV': + HVCol = 'HV_Curves' + else: + HVCol = 'HV_Curves_'+azimuth + + # Initial setup + hvsrDF = hvsr_data.hvsr_windows_df + specAxisTimes = np.array([dt.isoformat() for dt in hvsrDF.index.to_pydatetime()]) + y_data = hvsr_data.x_freqs['Z'][1:] + image_data = np.stack(hvsrDF[HVCol]).T + + maxZ = np.percentile(image_data, 100) + minZ = np.percentile(image_data, 0) + + use_mask = hvsr_data.hvsr_windows_df.Use.values + use_mask = np.tile(use_mask, (image_data.shape[0],1)) + use_mask = np.where(use_mask is False, np.nan, use_mask) + + hmap = go.Heatmap(z=image_data, + x=specAxisTimes, + y=y_data, + colorscale='Turbo', + showlegend=False, + #opacity=0.7, + hovertemplate='Time [UTC]: %{x}<br>Frequency [Hz]: %{y:.2f}<br>H/V Amplitude: %{z:.2f}<extra></extra>', + zmin=minZ,zmax=maxZ, showscale=False, name='HV Curve Amp. over Time') + results_fig.add_trace(hmap, row=subplot_num, col=1) + + data_used = go.Heatmap( + x=specAxisTimes, + y=y_data, + z=use_mask.astype(bool), + showlegend=False, + colorscale=[[0, 'rgba(0,0,0,0.66)'], [0.25, 'rgba(0,0,0,0.66)'], [1, 'rgba(250,250,250,0)']], + showscale=False, name='Used') + results_fig.add_trace(data_used, row=subplot_num, col=1) + + + # tp currently is not being added to spec_plot_list + if 'tp' in spec_plot_list: + yvals = [] + for row in hvsrDF[HVCol].values: + maxInd = np.argmax(row) + yvals.append(y_data[maxInd]) + tp_trace = go.Scatter(x=specAxisTimes, y=yvals, mode='markers', + line=None, marker=dict(color='white', size=2, line=dict(color='black', width=0.1)), name='Individual H/V Peaks') + results_fig.add_trace(tp_trace, row=subplot_num, col='all') + + if 'p' in spec_plot_list: + results_fig.add_hline(y=hvsr_data['BestPeak'][azimuth]['f0'], line_width=1, line_dash='dash', line_color='black', row=subplot_num, col='all') + + if 'ann' in spec_plot_list: + results_fig.add_annotation(x=specAxisTimes[-1], + y=hvsr_data['hvsr_band'][1], + text=f"Peak: {hvsr_data['BestPeak'][azimuth]['f0']:.3f} Hz", + bgcolor='rgba(255, 255, 255, 0.7)', + showarrow=False, xanchor='right', yanchor='top', + row=subplot_num, col='all') + + if 'leg' in spec_plot_list: + pass + + results_fig.update_yaxes(type='log', + range=[np.log10(hvsr_data['hvsr_band'][0]), np.log10(hvsr_data['hvsr_band'][1])], + row=subplot_num, col=1) + + results_fig.add_annotation( + text=f"{hvsrDF['Use'].astype(bool).sum()}/{hvsrDF.shape[0]} windows used", + x=max(specAxisTimes), + y=np.log10(min(y_data))+(np.log10(max(y_data))-np.log10(min(y_data)))*0.01, + xanchor="right", yanchor="bottom",bgcolor='rgba(256,256,256,0.7)', + showarrow=False,row=subplot_num, col=1) + + return results_fig
+ + +
+[docs] +def plot_results(hv_data, plot_string='HVSR p ann C+ p SPEC ann', results_fig=None, results_graph_widget=None, return_fig=False, show_results_plot=True): + if results_fig is None: + results_fig = go.FigureWidget() + + hvsr_data = hv_data + + xlim = [hvsr_data.hvsr_band[0], hvsr_data.hvsr_band[1]] + plotymax = max(hvsr_data.hvsrp2['HV']) + (max(hvsr_data.hvsrp2['HV']) - max(hvsr_data.hvsr_curve)) + ylim = [0, plotymax] + + if isinstance(hvsr_data, sprit_hvsr.HVSRBatch): + hvsr_data=hvsr_data[0] + + hvsrDF = hvsr_data.hvsr_windows_df + + plot_list = parse_plot_string(plot_string) + + combinedComp=False + # By default there 3 subplots + noSubplots = 3 + # Remove any subplots that are not indicated by plot_type parameter + noSubplots = noSubplots - plot_list.count([]) + + # Now, check if comp plot is combined with HV + if plot_list[1] != [] and '+' not in plot_list[1][0]: + combinedComp = True + noSubplots -= 1 + + # Get all data for each plotted item + # Get subplot numbers based on plot_list + spec=[] + if plot_list[0]==[]: + # if for some reason, HVSR plot was not indicated, add it + hv_plot_row = 1 # Default first row to hv (may change later) + noSubplots += 1 + if plot_list[1] == []: + comp_plot_row = None + if plot_list[2] == []: + spec_plot_row = None + hv_plot_row = 1 #If nothing specified, do basic h/v plot + else: + spec_plot_row = 1 # If only spec specified + else: + comp_plot_row = 1 # If no HV specified by comp is, comp is subplot 1 + + if plot_list[2] == []: + spec_plot_row = None + else: + spec_plot_row = 2 # If only comp and spec specified comp first then spec + else: + hv_plot_row = 1 # HV specified explicitly + if plot_list[1] == []: + comp_plot_row = None + if plot_list[2] == []: + spec_plot_row = None + else: + spec_plot_row = 2 # if no comp specified, spec is 2nd subplot + else: + if combinedComp: + comp_plot_row = 1 + if plot_list[2] == []: + spec_plot_row = None + else: + spec_plot_row = 2 + else: + comp_plot_row = 2 + if plot_list[2] == []: + spec_plot_row = None + else: + spec_plot_row = 3 + + specList=[] + rHeights=[1] + if hv_plot_row == 1: + if comp_plot_row == 1: + specList.append([{'secondary_y': True}]) + if spec_plot_row == 2: + specList.append([{'secondary_y': False}]) + else: + specList.append([{'secondary_y': False}]) + + if noSubplots >= 2: + specList.append([{'secondary_y': False}]) + rHeights = [1.5,1] + if noSubplots == 3: + specList.append([{'secondary_y': False}]) + rHeights = [2,1.5,1] + + # Failsafes + while len(specList)<noSubplots: + specList.append([{}]) + + while len(rHeights)<noSubplots: + rHeights.append(1) + + # Re-initialize results_fig + results_fig.data = [] + results_fig.update_layout(grid=None) # Clear the existing grid layout, in case applicable + + results_subp = subplots.make_subplots(rows=noSubplots, cols=1, horizontal_spacing=0.01, vertical_spacing=0.07, + specs=specList, + row_heights=rHeights) + results_fig.update_layout(grid={'rows': noSubplots}) + + results_fig = go.FigureWidget(results_subp) + + if plot_list[1] != []: + results_fig = parse_comp_plot_list(hvsr_data, results_fig=results_fig, comp_plot_list=plot_list[1]) + results_fig.update_xaxes(title_text='Frequency [Hz]', row=comp_plot_row, col=1) + + # HVSR Plot (plot this after COMP so it is on top COMP and to prevent deletion with no C+) + results_fig = parse_hv_plot_list(hvsr_data, hvsr_plot_list=plot_list, results_fig=results_fig) + + # Will always plot the HV Curve + results_fig.add_trace(go.Scatter(x=hvsr_data.x_freqs['Z'],y=hvsr_data.hvsr_curve, + line={'color':'black', 'width':1.5}, marker=None, name='HVSR Curve'), + row=1, col='all') + # SPEC plot + if plot_list[2] != []: + results_fig = parse_spec_plot_list(hvsr_data, spec_plot_list=plot_list[2], subplot_num=spec_plot_row, results_fig=results_fig) + + # Final figure updating + resultsFigWidth=800 + + components_HV_on_same_plot = (plot_list[1]==[] or '+' not in plot_list[1][0]) + if components_HV_on_same_plot: + compxside = 'bottom' + secondaryY = True + showHVTickLabels = True + showComptickLabels = True + else: + compxside = 'bottom' + secondaryY = False + showHVTickLabels = True + showComptickLabels = True + + # Update H/V Plot + results_fig.update_xaxes(type='log', title_text='Frequency [Hz]', title_standoff=0, + range=[np.log10(hvsr_data['hvsr_band'][0]), np.log10(hvsr_data['hvsr_band'][1])], + side='bottom', showticklabels=showHVTickLabels, + row=1, col=1) + results_fig.update_yaxes(title_text='H/V Ratio', row=1, col=1, secondary_y=False, range=ylim) + + # Update Component plot + results_fig.update_xaxes(type='log',overlaying='x', showticklabels=showComptickLabels, title_standoff=0, + range=[np.log10(hvsr_data['hvsr_band'][0]), np.log10(hvsr_data['hvsr_band'][1])], + side=compxside, row=comp_plot_row, col=1) + results_fig.update_yaxes(title_text="PPSD Amp\n[m2/s4/Hz][dB]", secondary_y=secondaryY, row=comp_plot_row, col=1) + + # Update Spec plot + results_fig.update_yaxes(title_text='H/V Over Time', row=noSubplots, col=1) + + # Update entire figure + results_fig.update_layout(margin={"l":10, "r":10, "t":35, 'b':0}, + showlegend=False, autosize=True, width=resultsFigWidth, height=resultsFigWidth*0.8, + title=f"{hvsr_data['site']} Results") + + # Reset results_graph_widget and display + if results_graph_widget is not None: + with results_graph_widget: + clear_output(wait=True) + display(results_fig) + + if show_results_plot: + results_fig.show() + + if return_fig: + return results_fig
+ + +
+[docs] +def plot_preview(hv_data, stream=None, preview_fig=None, spectrogram_component='Z', show_plot=True, return_fig=False): + if preview_fig is None: + preview_subp = subplots.make_subplots(rows=4, cols=1, shared_xaxes=True, horizontal_spacing=0.01, vertical_spacing=0.01, row_heights=[3,1,1,1]) + #preview_fig = go.FigureWidget(preview_subp) + preview_fig = go.Figure(preview_subp) + + preview_fig.data = [] + + hvsr_data = hv_data + if isinstance(hvsr_data, sprit_hvsr.HVSRBatch): + hvsr_data=hvsr_data[0] + + if stream is not None: + # This is only used for fetch_data, which ['stream'] has not yet been defined + hvsr_data['stream'] = stream + + if isinstance(hvsr_data, (sprit_hvsr.HVSRData, dict)): + stream_z = hvsr_data['stream'].select(component='Z').merge() + stream_e = hvsr_data['stream'].select(component='E').merge() + stream_n = hvsr_data['stream'].select(component='N').merge() + hvsrBand = hvsr_data['hvsr_band'] + siteName = hvsr_data['site'] + else: + # This is in case data is an obspy stream + stream_z = hvsr_data.select(component='Z').merge() + stream_e = hvsr_data.select(component='E').merge() + stream_n = hvsr_data.select(component='N').merge() + hvsrBand = [0.4, 40] + siteName = 'HVSRSite' + + # Get iso_times and datetime.datetime + utcdt = stream_z[0].times(type='utcdatetime') + iso_times=[] + dt_times = [] + for t in utcdt: + if t is not np.ma.masked: + iso_times.append(t.isoformat()) + dt_times.append(datetime.datetime.fromisoformat(t.isoformat())) + else: + iso_times.append(np.nan) + iso_times=np.array(iso_times) + dt_times = np.array (dt_times) + + # Generate spectrogram + specKey=spectrogram_component.upper() + specStreamDict = {'Z':stream_z[0], + 'E':stream_e[0], + 'N':stream_n[0]} + f, t, Sxx = signal.spectrogram(x=specStreamDict[specKey].data, fs=specStreamDict[specKey].stats.sampling_rate, mode='magnitude') + + # Get times for the axis (one time per window) + axisTimes = [] + for tpass in t: + axisTimes.append((dt_times[0]+datetime.timedelta(seconds=tpass)).isoformat()) + + # Add data to preview_fig + # Add spectrogram of Z component + minz = np.percentile(Sxx, 1) + maxz = np.percentile(Sxx, 99) + hmap = go.Heatmap(z=Sxx, + x=axisTimes, + y=f, + colorscale='Turbo', + showlegend=False, + hovertemplate='Time [UTC]: %{x}<br>Frequency [Hz]: %{y:.2f}<br>Spectrogram Magnitude: %{z:.2f}<extra></extra>', + zmin=minz, zmax=maxz, showscale=False, name=f'{specKey} Component Spectrogram') + preview_fig.add_trace(hmap, row=1, col=1) + preview_fig.update_yaxes(type='log', range=[np.log10(hvsrBand[0]), np.log10(hvsrBand[1])], row=1, col=1) + preview_fig.update_yaxes(title={'text':f'Spectrogram ({specKey})'}, row=1, col=1) + + # Add raw traces + dec_factor=5 #This just makes the plotting go faster, by "decimating" the data + preview_fig.add_trace(go.Scatter(x=iso_times[::dec_factor], y=stream_z[0].data[::dec_factor], + line={'color':'black', 'width':0.5},marker=None, name='Z component data'), row=2, col='all') + preview_fig.update_yaxes(title={'text':'Z'}, row=2, col=1) + preview_fig.add_trace(go.Scatter(x=iso_times[::dec_factor], y=stream_e[0].data[::dec_factor], + line={'color':'blue', 'width':0.5},marker=None, name='E component data'),row=3, col='all') + preview_fig.update_yaxes(title={'text':'E'}, row=3, col=1) + preview_fig.add_trace(go.Scatter(x=iso_times[::dec_factor], y=stream_n[0].data[::dec_factor], + line={'color':'red', 'width':0.5},marker=None, name='N component data'), row=4, col='all') + preview_fig.update_yaxes(title={'text':'N'}, row=4, col=1) + + #preview_fig.add_trace(p) + preview_fig.update_layout(margin={"l":10, "r":10, "t":30, 'b':0}, showlegend=False, + title=f"{siteName} Data Preview") + + if show_plot: + preview_fig.show() + + if return_fig: + return preview_fig
+ + +
+[docs] +def plot_outlier_curves(hvsr_data, plot_engine='plotly', rmse_thresh=0.98, use_percentile=True, use_hv_curve=False, from_roc=False, show_plot=True, verbose=False): + hv_data = hvsr_data + #outlier_fig = go.FigureWidget() + outlier_fig = go.Figure() + + roc_kwargs = {'rmse_thresh':rmse_thresh, + 'use_percentile':True, + 'use_hv_curve':use_hv_curve, + 'show_outlier_plot':False, + 'plot_engine':'None', + 'verbose':verbose + } + if 'PPSDStatus' in hvsr_data.ProcessingStatus.keys() and hvsr_data.ProcessingStatus['PPSDStatus']: + #log_textArea.value += f"\n\n{datetime.datetime.now()}\nremove_outlier_curves():\n'{roc_kwargs}" + #hvsr_data = sprit_hvsr.remove_outlier_curves(hvsr_data, **roc_kwargs) + pass + else: + #log_textArea.value += f"\n\n{datetime.datetime.now()}\nremove_outlier_curves() attempted, but not completed. hvsr_data.ProcessingStatus['PPSDStatus']=False\n'{roc_kwargs}" + return outlier_fig + + if roc_kwargs['use_hv_curve']: + no_subplots = 1 + if hasattr(hvsr_data, 'hvsr_windows_df') and 'HV_Curves' in hvsr_data.hvsr_windows_df.columns: + outlier_fig.data = [] + outlier_fig.update_layout(grid=None) # Clear the existing grid layout + outlier_subp = subplots.make_subplots(rows=no_subplots, cols=1, horizontal_spacing=0.01, vertical_spacing=0.1) + outlier_fig.update_layout(grid={'rows': 1}) + #outlier_fig = go.FigureWidget(outlier_subp) + outlier_fig = go.Figure(outlier_subp) + + x_data = hvsr_data['x_freqs'] + curve_traces = [] + for hv in hvsr_data.hvsr_windows_df['HV_Curves'].iterrows(): + curve_traces.append(go.Scatter(x=x_data, y=hv[1])) + outlier_fig.add_traces(curve_traces) + + # Calculate a median curve, and reshape so same size as original + medCurve = np.nanmedian(np.stack(hvsr_data.hvsr_windows_df['HV_Curves']), axis=0) + outlier_fig.add_trace(go.Scatter(x=x_data, y=medCurve, line=dict(color='rgba(0,0,0,1)', width=1.5),showlegend=False)) + + minY = np.nanmin(np.stack(hvsr_data.hvsr_windows_df['HV_Curves'])) + maxY = np.nanmax(np.stack(hvsr_data.hvsr_windows_df['HV_Curves'])) + totalWindows = hvsr_data.hvsr_windows_df.shape[0] + #medCurveArr = np.tile(medCurve, (curr_data.shape[0], 1)) + + else: + no_subplots = 3 + outlier_fig.data = [] + outlier_fig.update_layout(grid=None) # Clear the existing grid layout + outlier_subp = subplots.make_subplots(rows=no_subplots, cols=1, horizontal_spacing=0.01, vertical_spacing=0.02, + row_heights=[1, 1, 1]) + outlier_fig.update_layout(grid={'rows': 3}) + #outlier_fig = go.FigureWidget(outlier_subp) + outlier_fig = go.Figure(outlier_subp) + + if hasattr(hvsr_data, 'hvsr_windows_df'): + rowDict = {'Z':1, 'E':2, 'N':3} + showTLabelsDict={'Z':False, 'E':False, 'N':True} + def comp_rgba(comp, a): + compstr = '' + if comp=='Z': + compstr = f'rgba(0, 0, 0, {a})' + if comp=='E': + compstr = f'rgba(50, 50, 250, {a})' + if comp=='N': + compstr = f'rgba(250, 50, 50, {a})' + return compstr + compNames = ['Z', 'E', 'N'] + rmse_to_plot=[] + med_traces=[] + + noRemoved = 0 + indRemoved = [] + for i, comp in enumerate(compNames): + if hasattr(hvsr_data, 'x_freqs'): + x_data = hvsr_data['x_freqs'][comp] + else: + x_data = [1/p for p in hvsr_data['ppsds'][comp]['period_xedges'][1:]] + column = 'psd_values_'+comp + # Retrieve data from dataframe (use all windows, just in case) + curr_data = np.stack(hvsr_data['hvsr_windows_df'][column]) + + # Calculate a median curve, and reshape so same size as original + medCurve = np.nanmedian(curr_data, axis=0) + medCurveArr = np.tile(medCurve, (curr_data.shape[0], 1)) + medTrace = go.Scatter(x=x_data, y=medCurve, line=dict(color=comp_rgba(comp, 1), width=1.5), + name=f'{comp} Component', showlegend=True) + # Calculate RMSE + rmse = np.sqrt(((np.subtract(curr_data, medCurveArr)**2).sum(axis=1))/curr_data.shape[1]) + + rmse_threshold = np.percentile(rmse, roc_kwargs['rmse_thresh']) + + # Retrieve index of those RMSE values that lie outside the threshold + timeIndex = hvsr_data['hvsr_windows_df'].index + for j, curve in enumerate(curr_data): + if rmse[j] > rmse_threshold: + badTrace = go.Scatter(x=x_data, y=curve, + line=dict(color=comp_rgba(comp, 1), width=1.5, dash='dash'), + #marker=dict(color=comp_rgba(comp, 1), size=3), + name=str(hvsr_data.hvsr_windows_df.index[j]), showlegend=False) + outlier_fig.add_trace(badTrace, row=rowDict[comp], col=1) + if j not in indRemoved: + indRemoved.append(j) + noRemoved += 1 + else: + goodTrace = go.Scatter(x=x_data, y=curve, + line=dict(color=comp_rgba(comp, 0.01)), name=str(hvsr_data.hvsr_windows_df.index[j]), showlegend=False) + outlier_fig.add_trace(goodTrace, row=rowDict[comp], col=1) + + #timeIndRemoved = pd.DatetimeIndex([timeIndex[ind] for ind in indRemoved]) + #hvsr_data['hvsr_windows_df'].loc[timeIndRemoved, 'Use'] = False + + outlier_fig.add_trace(medTrace, row=rowDict[comp], col=1) + + outlier_fig.update_xaxes(showticklabels=False, row=1, col=1) + outlier_fig.update_yaxes(title={'text':'Z'}, row=1, col=1) + outlier_fig.update_xaxes(showticklabels=False, row=2, col=1) + outlier_fig.update_yaxes(title={'text':'E'}, row=2, col=1) + outlier_fig.update_xaxes(showticklabels=True, row=3, col=1) + outlier_fig.update_yaxes(title={'text':'N'}, row=3, col=1) + + outlier_fig.update_layout(margin={"l":10, "r":10, "t":30, 'b':0}, showlegend=True, + title=f"{hvsr_data['site']} Outliers") + if comp == 'N': + minY = np.nanmin(curr_data) + maxY = np.nanmax(curr_data) + totalWindows = curr_data.shape[0] + + outlier_fig.add_annotation( + text=f"{len(indRemoved)}/{totalWindows} outlier windows removed", + x=np.log10(max(x_data)) - (np.log10(max(x_data))-np.log10(min(x_data))) * 0.01, + y=minY+(maxY-minY)*0.01, + xanchor="right", yanchor="bottom",#bgcolor='rgba(256,256,256,0.7)', + showarrow=False,row=no_subplots, col=1) + + + outlier_fig.update_xaxes(type='log') + #with outlier_graph_widget: + # clear_output(wait=True) + # display(outlier_fig) + + hvsr_data['OutlierPlot'] = outlier_fig # not currently using + if show_plot: + outlier_fig.show() + + return outlier_fig
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_modules/sprit/sprit_streamlit_ui.html b/docs/_modules/sprit/sprit_streamlit_ui.html new file mode 100644 index 00000000..8ac3d0b8 --- /dev/null +++ b/docs/_modules/sprit/sprit_streamlit_ui.html @@ -0,0 +1,694 @@ + + + + + + sprit.sprit_streamlit_ui — sprit 1.4 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for sprit.sprit_streamlit_ui

+import datetime
+import inspect
+import pathlib
+import sys
+import tempfile
+import zoneinfo
+
+import numpy as np
+import streamlit as st
+from obspy import UTCDateTime
+from obspy.signal.spectral_estimation import PPSD
+
+try:
+    from sprit import sprit_hvsr
+except Exception:
+    try:
+        import sprit_hvsr
+    except Exception:
+        import sprit
+
+verbose = False
+
+if verbose:
+    print('Start of file, session state length: ', len(st.session_state.keys()))
+param2print = None# 'period_limits'
+
+
+print_param(param2print)
+
+icon=r"C:\Users\riley\LocalData\Github\SPRIT-HVSR\sprit\resources\icon\sprit_icon_alpha.ico"
+icon=":material/ssid_chart:"
+aboutStr = """
+# About SpRIT
+## v1.0.2
+
+SpRIT is developed by Riley Balikian at the Illinois State Geological Survey.
+
+Please visit the following links for any questions:
+* [API Documentation](https://sprit.readthedocs.io/en/latest/)
+* [Wiki](https://github.com/RJbalikian/SPRIT-HVSR/wiki) 
+* [Pypi Repository](https://pypi.org/project/sprit/)
+
+"""
+if verbose:
+    print('Start setting up page config, session state length: ', len(st.session_state.keys()))
+st.set_page_config('SpRIT HVSR',
+                page_icon=icon,
+                layout='wide',
+                menu_items={'Get help': 'https://github.com/RJbalikian/SPRIT-HVSR/wiki',
+                                'Report a bug': "https://github.com/RJbalikian/SPRIT-HVSR/issues",
+                                'About': aboutStr})
+
+if verbose:
+    print('Start setting up constants/variables, session state length: ', len(st.session_state.keys()))
+OBSPYFORMATS =  ['AH', 'ALSEP_PSE', 'ALSEP_WTH', 'ALSEP_WTN', 'CSS', 'DMX', 'GCF', 'GSE1', 'GSE2', 'KINEMETRICS_EVT', 'KNET', 'MSEED', 'NNSA_KB_CORE', 'PDAS', 'PICKLE', 'Q', 'REFTEK130', 'RG16', 'SAC', 'SACXY', 'SEG2', 'SEGY', 'SEISAN', 'SH_ASC', 'SLIST', 'SU', 'TSPAIR', 'WAV', 'WIN', 'Y']
+bandVals=[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100]
+
+# SETUP KWARGS
+if verbose:
+    print('Start setting up kwargs dicts, session state length: ', len(st.session_state.keys()))
+
+ip_kwargs = {}
+fd_kwargs = {}
+ca_kwargs = {}
+rn_kwargs = {}
+gppsd_kwargs = {}
+phvsr_kwargs = {}
+roc_kwargs = {}
+cp_kwargs = {}
+gr_kwargs = {}
+run_kwargs = {}
+
+if verbose:
+    print('Start getting default values, session state length: ', len(st.session_state.keys()))
+    print_param(param2print)
+
+# Get default values
+sigList = [[sprit_hvsr.input_params, ip_kwargs], [sprit_hvsr.fetch_data, fd_kwargs], [sprit_hvsr.calculate_azimuth, ca_kwargs],
+            [sprit_hvsr.remove_noise, rn_kwargs], [sprit_hvsr.generate_ppsds, gppsd_kwargs], [PPSD, gppsd_kwargs],
+            [sprit_hvsr.process_hvsr, phvsr_kwargs], [sprit_hvsr.remove_outlier_curves, roc_kwargs],
+            [sprit_hvsr.check_peaks, cp_kwargs], [sprit_hvsr.get_report, gr_kwargs]]
+
+
+
+[docs] +def setup_session_state(): + if "default_params" not in st.session_state.keys(): + # "Splash screen" (only shows at initial startup) + mainContainerInitText = """ + # SpRIT HVSR + ## About + SpRIT HVSR is developed by the Illinois State Geological Survey, part of the Prairie Research Institute at the University of Illinois. + + ## Links + * API Documentation may be accessed [here (hosted by ReadtheDocs)](https://sprit.readthedocs.io/en/latest/) and [here (hosted by Github Pages)](https://rjbalikian.github.io/SPRIT-HVSR/main.html) + * The Wiki and Tutorials may be accessed [here](https://github.com/RJbalikian/SPRIT-HVSR/wiki) + * Source Code may be accessed here: [https://github.com/RJbalikian/SPRIT-HVSR](https://github.com/RJbalikian/SPRIT-HVSR) + * PyPI repository may be accessed [here](https://pypi.org/project/sprit/) + + ## MIT License + It is licensed under the MIT License: + > Permission is hereby granted, free of charge, to any person obtaining a copy + > of this software and associated documentation files (the "Software"), to deal + > in the Software without restriction, including without limitation the rights + > to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + > copies of the Software, and to permit persons to whom the Software is + > furnished to do so, subject to the following conditions: + > + > The above copyright notice and this permission notice shall be included in all + > copies or substantial portions of the Software. + > + > THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + > IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + > FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + > AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + > LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + > OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + > SOFTWARE. + """ + st.markdown(mainContainerInitText, unsafe_allow_html=True) + if verbose: + print('Start sig loop, session state length: ', len(st.session_state.keys())) + print_param(param2print) + + for sig in sigList: + funSig = inspect.signature(sig[0]) + for arg in funSig.parameters.keys(): + if not (funSig.parameters[arg].default is funSig.parameters[arg].empty): + sig[1][arg] = funSig.parameters[arg].default + run_kwargs[arg] = funSig.parameters[arg].default + + gppsd_kwargs['ppsd_length'] = run_kwargs['ppsd_length'] = 30 + gppsd_kwargs['skip_on_gaps'] = run_kwargs['skip_on_gaps'] = True + gppsd_kwargs['period_step_octaves'] = run_kwargs['period_step_octaves'] = 0.03125 + gppsd_kwargs['period_limits'] = run_kwargs['period_limits'] = [1/run_kwargs['hvsr_band'][1], 1/run_kwargs['hvsr_band'][0]] + if verbose: + print('Done getting kwargs: ', len(st.session_state.keys())) + print_param(param2print) + + print('Setting up session state: ', len(st.session_state.keys())) + #st.session_state["updated_kwargs"] = {} + for key, value in run_kwargs.items(): + if verbose: + print(f'Resetting {key} to {value}') + print_param(param2print) + + # if key in st.session_state.keys() and (st.session_state[key] != value): + st.session_state[key] = value + + #listItems = ['source', 'tzone', 'elev_unit', 'data_export_format', 'detrend', 'special_handling', 'peak_selection', 'freq_smooth', 'horizontal_method', 'stalta_thresh'] + ## Convert items to lists + #for arg, value in st.session_state.items(): + # if arg in listItems: + # valList = [value] + # st.session_state[arg] = valList + # run_kwargs[arg] = st.session_state[arg] + + strItems = ['channels', 'xcoord', 'ycoord', 'elevation', 'detrend_order', 'horizontal_method'] + # Convert lists and numbers to strings + for arg, value in st.session_state.items(): + if arg in strItems: + if isinstance(value, (list, tuple)): + newVal = '[' + for item in value: + newVal = newVal+item+', ' + newVal = newVal[:-2]+']' + + st.session_state[arg] = newVal + run_kwargs[arg] = newVal + else: + st.session_state[arg] = str(value) + run_kwargs[arg] = str(value) + + if verbose: + print_param(param2print) + + dtimeItems = ['acq_date', 'starttime', 'endtime'] + # Convert everything to python datetime objects + for arg, value in st.session_state.items(): + if arg in dtimeItems: + if isinstance(value, str): + st.session_state[arg] = datetime.datetime.strptime(value, "%Y-%m-%d") + run_kwargs[arg] = datetime.datetime.strptime(value, "%Y-%m-%d") + elif isinstance(st.session_state[arg], UTCDateTime): + st.session_state[arg] = value.datetime + run_kwargs[arg] = value.datetime + else: + st.session_state[arg] = value + run_kwargs[arg] = value + + if verbose: + print_param(param2print) + + # Case matching + # Add if-statement for docs building + if len(st.session_state.keys()) > 0: #print('allkeys', list(st.session_state.keys())) + st.session_state.data_export_format = run_kwargs['data_export_format'] = st.session_state.data_export_format.upper() + st.session_state.detrend = run_kwargs['detrend'] = st.session_state.detrend.title() + st.session_state.remove_method = run_kwargs['remove_method'] = str(st.session_state.remove_method).title() + st.session_state.peak_selection = run_kwargs['peak_selection'] = st.session_state.peak_selection.title() + st.session_state.freq_smooth = run_kwargs['freq_smooth'] = st.session_state.freq_smooth.title() + st.session_state.source = run_kwargs['source'] = st.session_state.source.title() + + if verbose: + print_param(param2print) + + + # Default adjustments + methodDict = {'None':'Geometric Mean', '0':'Diffuse Field Assumption', '1':'Arithmetic Mean', '2':'Geometric Mean', '3':'Vector Summation', '4':'Quadratic Mean', '5':'Maximum Horizontal Value', '6':'Azimuth'} + st.session_state.horizontal_method = run_kwargs['horizontal_method'] = methodDict[st.session_state.horizontal_method] + st.session_state.plot_engine = run_kwargs['plot_engine'] = 'Plotly' + if verbose: + print_param(param2print) + + + st.session_state.default_params = run_kwargs + st.session_state.run_kws = list(run_kwargs.keys()) + + if verbose: + for key, value in st.session_state.items(): + print("session st: ", st.session_state[key], type( st.session_state[key]), '| rkwargs:', value, type(value)) + + + if verbose: + print('Done with setup, session state length: ', len(st.session_state.keys())) + print_param(param2print)
+ + +setup_session_state() + +
+[docs] +def check_if_default(): + if len(st.session_state.keys()) > 0: + print('Checking defaults, session state length: ', len(st.session_state.keys())) + print_param(param2print)
+ +if verbose: + check_if_default() + +
+[docs] +def text_change(verbose=verbose): + #Just a function to run so something is done when text changes + if verbose: + print('TEXTCHange')
+ + +
+[docs] +def on_file_upload(): + file = st.session_state.datapath_uploader + temp_dir = tempfile.mkdtemp() + path = pathlib.Path(temp_dir).joinpath(file.name) + with open(path, "wb") as f: + f.write(file.getvalue()) + if verbose: + print(file.name) + st.session_state.input_data = path.as_posix()
+ + + +
+[docs] +def on_run_data(): + mainContainer = st.container() + inputTab, outlierTab, infoTab, resultsTab = mainContainer.tabs(['Data', 'Outliers', 'Info','Results']) + plotReportTab, csvReportTab, strReportTab = resultsTab.tabs(['Plot', 'Results Table', 'Print Report']) + + if st.session_state.input_data!='': + srun = {} + for key, value in st.session_state.items(): + if key in st.session_state.run_kws and value != st.session_state.default_params[key]: + srun[key] = value + # Get plots all right + srun['plot_engine'] = 'plotly' + srun['plot_input_stream'] = True + srun['show_plot'] = False + srun['verbose'] = False #True + if verbose: + print('SPRIT RUN', srun) + st.toast('Data is processing', icon="⌛") + with mainContainer: + spinnerText = 'Data is processing with default parameters.' + excludedKeys = ['plot_engine', 'plot_input_stream', 'show_plot', 'verbose'] + NOWTIME = datetime.datetime.now() + secondaryDefaults = {'acq_date':datetime.date(NOWTIME.year, NOWTIME.month, NOWTIME.day), + 'hvsr_band':(0.4, 40), 'use_hv_curve':True, + 'starttime':datetime.time(0,0,0), + 'endtime':datetime.time(23,59,0), + 'peak_freq_range':(0.4, 40), + 'stalta_thresh':(8, 16), + 'period_limits':(0.025, 2.5), + 'remove_method':['Auto'], + 'elev_unit':'m', + 'plot_type':'HVSR p ann C+ p ann Spec p' + } + nonDefaultParams = False + for key, value in srun.items(): + if key not in excludedKeys: + if key in secondaryDefaults and secondaryDefaults[key] == value: + pass + else: + nonDefaultParams = True + spinnerText = spinnerText + f"\n-\t {key} = {value} ({type(value)} is not {st.session_state.default_params[key]}; {type(st.session_state.default_params[key])})" + if nonDefaultParams: + spinnerText = spinnerText.replace('default', 'the following non-default') + with st.spinner(spinnerText): + st.session_state.hvsr_data = sprit_hvsr.run(input_data=st.session_state.input_data, **srun) + + write_to_info_tab(infoTab) + st.balloons() + + inputTab.plotly_chart(st.session_state.hvsr_data['InputPlot'], use_container_width=True) + outlierTab.plotly_chart(st.session_state.hvsr_data['OutlierPlot'], use_container_width=True) + plotReportTab.plotly_chart(st.session_state.hvsr_data['HV_Plot'], use_container_width=True) + csvReportTab.dataframe(data=st.session_state.hvsr_data['CSV_Report']) + strReportTab.text(st.session_state.hvsr_data['Print_Report']) + + st.session_state.prev_datapath=st.session_state.input_data
+ + +
+[docs] +def write_to_info_tab(info_tab): + with info_tab: + st.markdown("# Processing Parameters Used") + for fun, kwargDict in sigList: + funSig = inspect.signature(fun) + #excludeKeys = ['params', 'hvsr_data', 'hvsr_results'] + funMD = "" + for arg in funSig.parameters.keys(): + if arg in st.session_state.keys(): + funMD = funMD + f"""\n * {arg} = {st.session_state[arg]}""" + + with st.expander(f"{fun.__name__}"): + st.write(funMD, unsafe_allow_html=True)
+ + + +# DEFINE SIDEBAR +if verbose: + print('About to start setting up sidebar, session state length: ', len(st.session_state.keys())) + print_param(param2print) + +with st.sidebar: + if verbose: + print('Start setting up sidebar, session state length: ', len(st.session_state.keys())) + print_param(param2print) + + st.header('SpRIT HVSR', divider='rainbow') + datapathInput = st.text_input("Datapath", key='input_data', placeholder='Enter data filepath (to be read by obspy.core.Stream.read())') + # st.file_uploader('Upload data file(s)', type=OBSPYFORMATS, accept_multiple_files=True, key='datapath_uploader', on_change=on_file_upload) + with st.expander("Click to access data uploader"): + st.file_uploader("Upload data file(s)", type=OBSPYFORMATS, accept_multiple_files=False, key='datapath_uploader', on_change=on_file_upload) + + bottom_container = st.container() + + # Create top menu + with bottom_container: + + resetCol, readCol, runCol = st.columns([0.3, 0.3, 0.4]) + resetCol.button('Reset', disabled=True, use_container_width=True) + readCol.button('Read', use_container_width=True, args=((True, ))) + runCol.button('Run', type='primary', use_container_width=True, on_click=on_run_data) + + if verbose: + print('Done setting up bottom container, session state length: ', len(st.session_state.keys())) + print_param(param2print) + + # Add if-statement for docs building + if len(list(st.session_state.keys())) > 0: + st.header('Settings', divider='gray') + with st.expander('Expand to modify settings'): + if verbose: + print('Setting up sidebar expander, session state length: ', len(st.session_state.keys())) + print_param(param2print) + + ipSetTab, fdSetTab, rmnocSetTab, gpSetTab, phvsrSetTab, plotSetTab = st.tabs(['Input', 'Data', "Noise", 'PPSDs', 'H/V', 'Plot']) + #@st.experimental_dialog("Update Input Parameters", width='large') + #def open_ip_dialog(): + with ipSetTab: + if verbose: + print('Setting up input tab, session state length: ', len(st.session_state.keys())) + st.text_input("Site Name", placeholder='HVSR Site', on_change=text_change, key='site') + + #with st.expander('Primary Input Parameters', expanded=True): + + st.text_input('Instrument', help='Raspberry Shake and Tromino are currently the only values with special treatment. If a filepath, can use a .inst instrument file (json format)', key='instrument') + st.text_input('Metadata Filepath', help='Filepath to instrument response file', key='metapath') + + st.select_slider('HVSR Band', value=st.session_state.hvsr_band, options=bandVals, key='hvsr_band') + st.select_slider('Peak Frequency Range', value=st.session_state.peak_freq_range, options=bandVals, key='peak_freq_range') + + # with st.expander('Acquisition Date/Time'): + st.date_input('Acquisition Date', format='YYYY-MM-DD', key='acq_date') + st.time_input('Start time', step=60, key='starttime') + st.time_input('End time', step=60, key='endtime') + + tZoneList=list(zoneinfo.available_timezones()) + tZoneList.sort() + tZoneList.insert(0, "localtime") + tZoneList.insert(0, "US/Pacific") + tZoneList.insert(0, "US/Eastern") + tZoneList.insert(0, "US/Central") + tZoneList.insert(0, "UTC") + st.selectbox('Timezone', options=tZoneList, key='tzone') + + + #with st.expander('Instrument settings'): + st.text_input("Network", placeholder='AM', key='network') + st.text_input("Station", placeholder='RAC84', key='station') + st.text_input("Location", placeholder='00', key='loc') + st.text_input("Channels", placeholder='EHZ, EHE, EHN', key='channels') + + #with st.expander('Location settings'): + st.text_input('X Coordinate', help='i.e., Longitude or Easting', key='xcoord') + st.text_input('Y Coordinate', help='i.e., Latitude or Northing', key='ycoord') + st.text_input('Z Coordinate', help='i.e., Elevation', key='elevation') + st.session_state.elev_unit = st.selectbox('Z Unit', options=['m', 'ft'], help='i.e., Elevation unit') + st.number_input('Depth', help='i.e., Depth of measurement below ground surface (not currently used)', key='depth') + + st.text_input('CRS of Input Coordinates', help='Can be EPSG code or anything accepted by pyproj.CRS.from_user_input()', key='input_crs') + st.text_input('CRS for Export', help='Can be EPSG code or anything accepted by pyproj.CRS.from_user_input()', key='output_crs') + if verbose: + print_param(param2print) + + #@st.experimental_dialog("Update Parameters to Fetch Data", width='large') + #def open_fd_dialog(): + with fdSetTab: + if verbose: + print('Setting up fd tab, session state length: ', len(st.session_state.keys())) + #source: str = 'file', + st.selectbox('Source', options=['File', 'Raw', 'Directory', "Batch"], index=0, key='source') + st.text_input('Trim Directory', help='Directory for saving trimmed data', key='trim_dir') + st.selectbox('Data format', options=OBSPYFORMATS, index=11, key='data_export_format') + st.selectbox('Detrend horizontal_method', options=['None', 'Simple', 'Linear', 'Constant/Demean', 'Polynomial', 'Spline'], index=5, help='Detrend horizontal_method use by `type` parameter of obspy.trace.Trace.detrend()', key='detrend') + st.text_input('Detrend options', value='detrend_order=2', help="Comma separated values with equal sign between key/value of arguments to pass to the **options argument of obspy.trace.Trace.detrend()", key='detrend_order') + if verbose: + print_param(param2print) + + + #@st.experimental_dialog("Update Parameters to Generate PPSDs", width='large') + #def open_ppsd_dialog(): + with gpSetTab: + if verbose: + print('Setting up ppsd tab, session state length: ', len(st.session_state.keys())) + st.toggle('Skip on gaps', help='Determines whether time segments with gaps should be skipped entirely. Select skip_on_gaps=True for not filling gaps with zeros which might result in some data segments shorter than ppsd_length not used in the PPSD.', + key='skip_on_gaps') + st.number_input("Minimum Decibel Value", value=-200, step=1, key='min_deb') + st.number_input("Maximum Decibel Value", value=-50, step=1, key='max_deb') + st.number_input("Decibel bin size", value=1.0, step=0.1, key='deb_step') + st.session_state.db_bins = (st.session_state.min_deb, st.session_state.max_deb, st.session_state.deb_step) + + st.number_input('PPSD Length (seconds)', step=1, key='ppsd_length') + st.number_input('PPSD Window overlap (%, 0-1)', step=0.01, min_value=0.0, max_value=1.0, key='overlap') + st.number_input('Period Smoothing Width (octaves)', step=0.1, key='period_smoothing_width_octaves') + st.number_input('Period Step (octaves)', step=0.005, format="%.5f", key='period_step_octaves') + periodVals=[round(1/x,3) for x in bandVals] + periodVals.sort() + + st.select_slider('Period Limits (s)', options=periodVals, value=st.session_state.period_limits, key='period_limits') + st.selectbox("Special Handling", options=['None', 'Ringlaser', 'Hydrophone'], key='special_handling') + if verbose: + print_param(param2print) + + #@st.experimental_dialog("Update Parameters to Remove Noise and Outlier Curves", width='large') + #def open_outliernoise_dialog(): + with rmnocSetTab: + if verbose: + print('Setting up noise tab, session state length: ', len(st.session_state.keys())) + st.number_input("Outlier Threshold", value=98, key='rmse_thresh') + st.radio('Threshold type', options=['Percentile', 'Value'], key='threshRadio') + st.session_state.use_percentile = st.session_state.threshRadio=='Percentile' + st.radio('Threshold curve', options=['HV Curve', 'Component Curves'], key='curveRadio') + st.session_state.use_hv_curve = (st.session_state.curveRadio=='HV Curve') + + st.multiselect("Noise Removal Method", + options=['None','Auto', 'Manual', 'Stalta', 'Saturation Threshold', 'Noise Threshold', 'Warmup', 'Cooldown', 'Buffer'], key='remove_method') + st.number_input('Saturation Percent', min_value=0.0, max_value=1.0, step=0.01, format="%.3f", key='sat_percent') + st.number_input('Noise Percent', min_value=0.0, max_value=1.0, step=0.1, format="%.2f", key='noise_percent') + st.number_input('Short Term Average (STA)', step=1.0, format="%.1f", key='sta') + st.number_input('Long Term Average (LTA)', step=1.0, format="%.1f", key='lta') + staltaVals = np.arange(0, 51).tolist() + st.select_slider('STA/LTA Thresholds', value=st.session_state.stalta_thresh, options=staltaVals, key='stalta_thresh') + st.number_input('Warmup Time (seconds)', step=1, key='warmup') + st.number_input('Cooldown Time (seconds)', step=1, key='cooldown') + st.number_input('Minimum Window Size (samples)', step=1, key='min_win_size') + st.toggle("Remove Raw Noise", help='Whether to use the raw input data to remove noise.', key='remove_raw_noise') + if verbose: + print_param(param2print) + + #@st.experimental_dialog("Update Parameters to Process HVSR", width='large') + #def open_processHVSR_dialog(): + with phvsrSetTab: + if verbose: + print('Setting up hvsr tab, session state length: ', len(st.session_state.keys())) + st.selectbox('Peak Selection Method', options=['Max', 'Scored'], key='peak_selection') + st.selectbox("Method to combine hoizontal components", + options=['Diffuse Field Assumption', 'Arithmetic Mean', 'Geometric Mean', 'Vector Summation', 'Quadratic Mean', 'Maximum Horizontal Value', 'Azimuth'], + index=2, key='horizontal_method') + rList = np.arange(1001).tolist() + rList[0] = False + st.selectbox("Curve Smoothing", options=['None', 'Savgoy Filter', 'Konno Ohmachi', "Proportional", "Constant"], index=2, key='freq_smooth') + st.select_slider("Curve Smoothing Parameter", options=np.arange(1000).tolist(), value=40, key='f_smooth_width') + st.select_slider("Resample", options=rList, value=1000, key='resample') + st.select_slider('Outlier Curve Removal', options=rList[:100], key='outlier_curve_rmse_percentile') + if verbose: + print_param(param2print) + + def update_plot_string(): + plotStringDict={'Peak Frequency':' p', 'Peak Amplitude':' pa', 'Annotation':' ann', + 'Time windows':' t', "Peaks of Time Windows": ' tp', + 'Test 1: Peak > 2x trough below':'1', + "Test 2: Peak > 2x trough above":'2', + "Test 3: Peak > 2":'3', + "Test 4":'4', "Test 5":'5', "Test 6":'6', + } + + plotString = '' + for plot in st.session_state.plotPlotStr: + if plot=='HVSR': + plotString=plotString+'HVSR' + for pc in st.session_state.hvsrPlotStr: + if 'test' in pc.lower(): + if 'test' not in plotString.lower(): + plotString = plotString + ' Test' + test_end_index = plotString.rfind("Test") + len("Test") + nextSpaceIndex = plotString[test_end_index:].rfind(" ") + if nextSpaceIndex == -1: + nextSpaceIndex=len(plotString) + noString = plotString[test_end_index:nextSpaceIndex] + noString = noString + plotStringDict[pc] + + # Order test numbers correctly + testNos = ''.join(sorted(noString)) + plotString = plotString[:test_end_index] + testNos + + else: + plotString = plotString + plotStringDict[pc] + if plot=='Components': + plotString=plotString+' C+' + for pc in st.session_state.compPlotStr: + plotString = plotString + plotStringDict[pc] + if plot=='Spectrogram': + plotString=plotString+' SPEC' + for pc in st.session_state.specPlotStr: + plotString = plotString + plotStringDict[pc] + if plot=='Azimuth': + plotString=plotString+' AZ' + st.session_state.plot_type = plotString + + + #@st.experimental_dialog("Update Plot Settings", width='large') + #def plot_settings_dialog(): + with plotSetTab: + if verbose: + print('Setting up plot tab, session state length: ', len(st.session_state.keys())) + + st.selectbox("Plot Engine (currently only plotly supported)", options=['Matplotlib', "Plotly"], key='plot_engine', disabled=True) + st.text_input("Plot type (plot string)", value='HVSR p ann C+ p ann Spec p', key='plot_type') + st.multiselect("Charts to show", options=['HVSR', "Components", 'Spectrogram', 'Azimuth'], default=['HVSR', 'Components', "Spectrogram"], + on_change=update_plot_string, key='plotPlotStr') + + st.header("HVSR Chart", divider='rainbow') + st.multiselect('Items to plot', options=['Peak Frequency', 'Peak Amplitude', 'Annotation', 'Time windows', "Peaks of Time Windows", + 'Test 1: Peak > 2x trough below' , "Test 2: Peak > 2x trough above", "Test 3: Peak > 2", "Test 4", "Test 5", "Test 6"], + on_change=update_plot_string, + default=["Peak Frequency", "Annotation"], key='hvsrPlotStr') + + st.header("Component Chart", divider='rainbow') + st.multiselect('Items to plot', options=['Peak Frequency', 'Annotation', 'Time windows'], on_change=update_plot_string, + default=["Peak Frequency", "Annotation"], key='compPlotStr') + + st.header('Spectrogram Chart', divider='rainbow') + st.multiselect('Items to plot', options=['Peak Frequency', 'Annotation'], key='specPlotStr', on_change=update_plot_string) + if verbose: + print_param(param2print) + + if verbose: + print('Done setting up sidebar, session state length: ', len(st.session_state.keys())) + print('Done setting up everything (end of main), session state length: ', len(st.session_state.keys())) + print_param(param2print) + #if __name__ == "__main__": + # main() +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_modules/sprit/sprit_tkinter_ui.html b/docs/_modules/sprit/sprit_tkinter_ui.html new file mode 100644 index 00000000..fc579642 --- /dev/null +++ b/docs/_modules/sprit/sprit_tkinter_ui.html @@ -0,0 +1,3250 @@ + + + + + + sprit.sprit_tkinter_ui — sprit 1.4 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for sprit.sprit_tkinter_ui

+"""This script contains all the functions, classes, etc. to create a tkinter app for graphical user interface.
+"""
+import datetime
+import functools
+import linecache
+import json
+import os
+import pathlib
+import pkg_resources
+import sys
+import threading
+import time
+import tkinter as tk
+from tkinter import filedialog
+from tkinter import ttk
+from tkinter.simpledialog import askinteger
+from tkinter import messagebox
+import traceback
+import warnings
+import zoneinfo
+
+import matplotlib
+import matplotlib.pyplot as plt
+from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)
+from matplotlib.backend_bases import MouseButton, MouseEvent
+import numpy as np
+#matplotlib.use('TkAgg')
+
+try: #For distribution
+    from sprit import sprit_utils
+    from sprit import sprit_hvsr
+except: #For local testing
+    import sprit_hvsr 
+    import sprit_utils
+    pass
+
+global spritApp
+global current_theme_name
+global SPRIT_App
+
+resource_dir = pathlib.Path(pkg_resources.resource_filename(__name__, 'resources/'))
+settings_dir = resource_dir.joinpath('settings')
+gui_theme_file = settings_dir.joinpath('gui_theme.json')
+with open(gui_theme_file, 'r') as f:
+    curr_gui_dict = json.load(f)
+current_theme_name = curr_gui_dict['theme_name']
+
+
+[docs] +class SPRIT_App: + global spritApp + def __init__(self, master): + self.master = master + self.master.title("SPRIT") + self.params = sprit_hvsr.HVSRData({'site':''}) + + # Set the theme + self.darkthemepath = pathlib.Path(pkg_resources.resource_filename(__name__, "resources/themes/forest-dark.tcl")) + self.lightthemepath = pathlib.Path(pkg_resources.resource_filename(__name__, "resources/themes/forest-light.tcl")) + + + + # Create the style object + self.style = ttk.Style(master) + # + #self.style.theme_use('forest-light') + + self.create_menubar() + self.create_tabs() + + self.master.rowconfigure(0, weight=1) + self.master.columnconfigure(0, weight=1) + + + if 'forest' in current_theme_name: + if 'light' in current_theme_name: + self.master.tk.call('source', self.lightthemepath) + else: + self.master.tk.call('source', self.darkthemepath) + else: + self.style.theme_use(current_theme_name) + # Create the dark theme + #self.style.theme_create("dark", parent="alt", settings={ + # "TLabel": {"configure": {"background": "black", "foreground": "white"}}, + # "TButton": {"configure": {"background": "black", "foreground": "white"}}, + # # Add more options here to style other widgets + #}) + + # Create the light theme + #self.style.theme_create("light", parent="alt", settings={ + # "TLabel": {"configure": {"background": "white", "foreground": "black"}}, + # "TButton": {"configure": {"background": "white", "foreground": "black"}}, + # # Add more options here to style other widgets + #}) + + #Method to log error message +
+[docs] + def log_errorMsg(self, logMsg): + + self.log_text.insert('end', logMsg) + self.tab_control.select(self.log_tab)
+ + + #Not currently working +
+[docs] + def manual_label_update(self): + for notebook in self.master.winfo_children(): + if isinstance(notebook, ttk.Notebook): + for tab_id in notebook.tabs(): + tab_frame = notebook.nametowidget(tab_id) + + for frame in tab_frame.winfo_children(): + if isinstance(frame, ttk.LabelFrame): + for widget in frame.winfo_children(): + if isinstance(widget, ttk.Label): + # apply the updated style to the label + + self.style.layout('CustTLabel', [('Label.border', {'sticky': 'nswe', 'border': '1', 'children': [('Label.padding', {'sticky': 'nswe', 'children': [('Label.text', {'sticky': 'nswe'})]})]})]) + self.style.configure('CustTLabel', background=self.style.lookup('style', 'background'), foreground=self.style.lookup('style', 'background')) + self.style.map('CustTLabel', {'priority':[('CustTLabel',1)]}) + widget.configure(style='CustTLabel')
+ + +
+[docs] + def create_menubar(self): + self.menubar = tk.Menu(self.master) + self.master.config(menu=self.menubar) + + self.sprit_menu = tk.Menu(self.menubar, tearoff=0) + + def on_theme_select(): + # Set the theme based on the selected value + self.style = ttk.Style() + + #Update the theme file so the new theme opens on reboot + prev_theme = curr_gui_dict['theme_name'] + curr_gui_dict['theme_name'] = self.theme_var.get() + with open(gui_theme_file, 'w') as f: + json.dump(curr_gui_dict, f) + + def apply_theme(): + if 'forest' in self.theme_var.get(): + if self.theme_var.get()=='forest-dark' and 'forest-dark' not in self.style.theme_names(): + self.master.tk.call('source', self.darkthemepath) + elif self.theme_var.get()=='forest-light' and 'forest-light' not in self.style.theme_names(): + self.master.tk.call('source', self.lightthemepath) + self.master.tk.call("ttk::style", "theme", "use", self.theme_var.get()) + + if curr_gui_dict['theme_name']=='forest-light' or curr_gui_dict['theme_name'] == 'forest-dark': + do_reboot = messagebox.askyesno('App Restart Required', + f"It is recommended to restart the SpRIT GUI at this time to apply this theme. If not, you may continue but theme errors may occur. Click No to retain current theme ({prev_theme}) \nReboot now?", + ) + print(do_reboot) + if do_reboot: + reboot_app() + else: + self.theme_var.set(prev_theme) + else: + apply_theme() + + """An attempt to get the backgrounds right + def apply_to_all_children(widget, func): + Recursively apply a function to all child widgets of a given widget + children = widget.winfo_children() + for child in children: + func(child) + apply_to_all_children(child, func) + return + + def change_background_color(widget): + if isinstance(widget, tk.Label): + widget.option_clear() + widget.configure(background=None, foreground=None) + return + + apply_to_all_children(self.master, change_background_color) + """ + + #self.master.tk.call("ttk::setTheme", self.theme_var.get()) + + #self.style.theme_use(self.theme_var.get()) + #self.master.tk.call('source', self.lightthemepath) + #self.style.theme_use(self.theme_var.get()) + #self.style.configure("TLabel", background=self.style.lookup('TLabel', 'background'), foreground=self.style.lookup('TLabel', 'background')) + + def import_parameters(self): + filepath = filedialog.askopenfilename() + + + def export_parameters(self): + filepath = filedialog.asksaveasfilename() + + self.theme_menu = tk.Menu(self.menubar, tearoff=0) + self.theme_var = tk.StringVar(value=current_theme_name) + self.theme_menu.add_radiobutton(label="Default", variable=self.theme_var, value="default", command=on_theme_select) + self.theme_menu.add_radiobutton(label="Clam", variable=self.theme_var, value="clam", command=on_theme_select) + self.theme_menu.add_radiobutton(label="Alt", variable=self.theme_var, value="alt", command=on_theme_select) + self.theme_menu.add_radiobutton(label="Forest Light (buggy)", variable=self.theme_var, value="forest-light", command=on_theme_select) + self.theme_menu.add_radiobutton(label="Forest Dark (buggy)", variable=self.theme_var, value="forest-dark", command=on_theme_select) + + self.sprit_menu.add_cascade(label="Theme", menu=self.theme_menu) + self.sprit_menu.add_command(label="Import Parameters", command=import_parameters) + self.sprit_menu.add_command(label="Export Parameters", command=export_parameters) + self.sprit_menu.add_separator() + self.sprit_menu.add_command(label="Exit", command=self.master.quit) + self.settings_menu = tk.Menu(self.menubar, tearoff=0) + self.instrument_menu = tk.Menu(self.settings_menu, tearoff=0) + self.instrument_var = tk.StringVar(value="Raspberry Shake") + self.instrument_menu.add_radiobutton(label="Raspberry Shake", variable=self.instrument_var, value="Raspberry Shake") + self.instrument_menu.add_radiobutton(label="Tromino", variable=self.instrument_var, value="Tromino") + self.instrument_menu.add_radiobutton(label="Other", variable=self.instrument_var, value="Other") + self.settings_menu.add_cascade(label="Instrument", menu=self.instrument_menu) + self.settings_menu.add_command(label="Processing Settings", command=lambda: self.tab_control.select(self.settings_tab)) + + self.menubar.add_cascade(label="SPRIT", menu=self.sprit_menu) + self.menubar.add_cascade(label="Settings", menu=self.settings_menu)
+ + +
+[docs] + def create_tabs(self): + self.style = ttk.Style(self.master) + + self.tab_control = ttk.Notebook(self.master) + + # INPUT TAB + self.input_tab = ttk.Frame(self.tab_control) + + # Configure the row and column of the input_tab to have a non-zero weight + hvsrFrame = ttk.LabelFrame(self.input_tab, text="Input Parameters") + #hvsrFrame.rowconfigure(0, weight=1) + hvsrFrame.columnconfigure(1, weight=1) + + # Logo and Site Name + # Replace "logo.png" with the path to your logo image + #self.logo = tk.PhotoImage(file="logo.png") + #self.logo_label = ttk.Label(hvsrFrame, image=self.logo) + #self.logo_label.grid(row=0, column=0) + self.processingData = False + + + def update_input_labels(hvsr_data): + #Update labels for data preview tab + self.input_data_label.configure(text=self.data_filepath_entry.get() + '\n' + str(hvsr_data['stream'])) + + self.obspySreamLabel_settings.configure(text=str(hvsr_data['stream'])) + + self.sensitivityLabelZ_settings.configure(text=hvsr_data['paz']['Z']['sensitivity']) + self.gainLabelZ_settings.configure(text=hvsr_data['paz']['Z']['gain']) + self.polesLabelZ_settings.configure(text=hvsr_data['paz']['Z']['poles']) + self.zerosLabelZ_settings.configure(text=hvsr_data['paz']['Z']['zeros']) + + self.sensitivityLabelN_settings.configure(text=hvsr_data['paz']['N']['sensitivity']) + self.gainLabelN_settings.configure(text=hvsr_data['paz']['N']['gain']) + self.polesLabelN_settings.configure(text=hvsr_data['paz']['N']['poles']) + self.zerosLabelN_settings.configure(text=hvsr_data['paz']['N']['zeros']) + + self.sensitivityLabelE_settings.configure(text=hvsr_data['paz']['E']['sensitivity']) + self.gainLabelE_settings.configure(text=hvsr_data['paz']['E']['gain']) + self.polesLabelE_settings.configure(text=hvsr_data['paz']['E']['poles']) + self.zerosLabelE_settings.configure(text=hvsr_data['paz']['E']['zeros']) + return + + self.data_read = False #Initialize + #FUNCTION TO READ DATA + @catch_errors + def read_data(): + update_progress_bars(prog_percent=0) + #messagebox.showinfo(title="Reading Data", message='Reading Data...') + self.log_text.insert('end', f'\n\nReading data [{datetime.datetime.now()}]\n\n') + self.starttime, self.endtime = get_times() + + + self.log_text.insert('end', f"{self.input_params_call['text']}\n\n") + self.log_text.insert('end', f"{self.fetch_data_call['text']}\n\n") + + if self.file_source.get() == 'batch': + batchType = self.batch_type.get() + if isinstance(self.fpath, str): + self.fpath = self.fpath + elif isinstance(self.fpath, tuple) and len(self.fpath)==1: + self.fpath = self.fpath[0] + elif len(self.fpath) > 1: + self.fpath = list(self.fpath) + else: + self.fpath = self.fpath[0] + + + update_progress_bars(prog_percent=1) + self.params = sprit_hvsr.input_params(input_data=self.fpath, + metapath = self.meta_path.get(), + site=self.site_name.get(), + network=self.network.get(), + station=self.station.get(), + loc=self.location.get(), + channels=[self.z_channel.get(), self.n_channel.get(), self.e_channel.get()], + acq_date = self.starttime.date(), + starttime = self.starttime, + endtime = self.endtime, + tzone = 'UTC', #Will always be converted to UTC before we get to this point when using gui + xcoord = self.x.get(), + ycoord = self.y.get(), + elevation = self.z.get(), + input_crs= self.input_crs.get(), + output_crs= self.output_crs.get(), + elev_unit= self.elev_unit.get(), + instrument = self.instrumentSel.get(), + hvsr_band = [self.hvsrBand_min.get(), self.hvsrBand_max.get()] ) + + if self.trim_dir.get()=='': + trimDir=None + else: + trimDir=self.trim_dir.get() + + update_progress_bars(prog_percent=2) + self.hvsr_data = sprit_hvsr.fetch_data(params=self.params, + source=self.file_source.get(), + trim_dir=trimDir, + export_format=self.export_format.get(), + detrend=self.detrend.get(), + detrend_order=self.detrend_order.get()) + + update_progress_bars(prog_percent=10) + self.site_options = self.hvsr_data.sites + + self.log_text.insert('end', f"{self.site_options}\n\n") + + firstSite = self.hvsr_data[list(self.hvsr_data.keys())[0]] + update_input_labels(firstSite) + update_site_dropdown() + + #Plot data in data preview tab + self.fig_pre, self.ax_pre = sprit_hvsr.plot_stream(stream=firstSite['stream'], params=firstSite, fig=self.fig_pre, axes=self.ax_pre, return_fig=True) + + #Plot data in noise preview tab + self.fig_noise, self.ax_noise = sprit_hvsr._plot_specgram_stream(stream=firstSite['stream'], params=firstSite, fig=self.fig_noise, ax=self.ax_noise, fill_gaps=0, component='Z', stack_type='linear', detrend='mean', dbscale=True, return_fig=True, cmap_per=[0.1,0.9]) + select_windows(event=None, initialize=True) + plot_noise_windows(self.hvsr_data) + + else: + if isinstance(self.fpath, str): + pass + elif len(self.fpath) > 1: + self.fpath = list(self.fpath) + else: + self.fpath = self.fpath[0] + + update_progress_bars(prog_percent=1) + self.params = sprit_hvsr.input_params( input_data=self.fpath, + metapath = self.meta_path.get(), + site=self.site_name.get(), + network=self.network.get(), + station=self.station.get(), + loc=self.location.get(), + channels=[self.z_channel.get(), self.n_channel.get(), self.e_channel.get()], + acq_date = self.starttime.date(), + starttime = self.starttime, + endtime = self.endtime, + tzone = 'UTC', #Will always be converted to UTC before we get to this point when using gui + xcoord = self.x.get(), + ycoord = self.y.get(), + elevation = self.z.get(), + input_crs= self.input_crs.get(), + output_crs= self.output_crs.get(), + elev_unit= self.elev_unit.get(), + instrument = self.instrumentSel.get(), + hvsr_band = [self.hvsrBand_min.get(), self.hvsrBand_max.get()] ) + self.hvsr_data = self.params + + if self.trim_dir.get()=='': + trimDir=None + else: + trimDir=self.trim_dir.get() + + update_progress_bars(prog_percent=2) + try: + self.hvsr_data = sprit_hvsr.fetch_data(params=self.params, + source=self.file_source.get(), + trim_dir=trimDir, + export_format=self.export_format.get(), + detrend=self.detrend.get(), + detrend_order=self.detrend_order.get()) + except: + traceback.print_exc() + + update_progress_bars(prog_percent=10) + update_input_labels(self.hvsr_data) + + # Plot data in data preview tab + self.fig_pre = sprit_hvsr.plot_stream(stream=self.hvsr_data['stream'], params=self.hvsr_data, fig=self.fig_pre, axes=self.ax_pre, return_fig=True) + + # Plot data in noise preview tab + self.fig_noise = sprit_hvsr._plot_specgram_stream(stream=self.hvsr_data['stream'], params=self.hvsr_data, fig=self.fig_noise, ax=self.ax_noise, fill_gaps=0, component='Z', stack_type='linear', detrend='mean', dbscale=True, return_fig=True, cmap_per=[0.1,0.9]) + select_windows(event=None, initialize=True) + plot_noise_windows(self.hvsr_data) + + self.data_read = True + if not self.processingData: + update_progress_bars(prog_percent=100) + self.tab_control.select(self.preview_data_tab) + return self.hvsr_data + + def report_results(hvsr_results, azimuth='HV'): + self.curveTest1ResultText.configure(text=hvsr_results['BestPeak'][azimuth]['Report']['Lw'][:-1]) + self.curveTest1Result.configure(text=hvsr_results['BestPeak'][azimuth]['Report']['Lw'][-1]) + + self.curveTest2ResultText.configure(text=hvsr_results['BestPeak'][azimuth]['Report']['Nc'][:-1]) + self.curveTest2Result.configure(text=hvsr_results['BestPeak'][azimuth]['Report']['Nc'][-1]) + + self.curveTest3ResultText.configure(text=hvsr_results['BestPeak'][azimuth]['Report']['σ_A(f)'][:-1]) + self.curveTest3Result.configure(text=hvsr_results['BestPeak'][azimuth]['Report']['σ_A(f)'][-1]) + + curvePass = (hvsr_results['BestPeak'][azimuth]['PassList']['WindowLengthFreq.'] + + hvsr_results['BestPeak'][azimuth]['PassList']['SignificantCycles']+ + hvsr_results['BestPeak'][azimuth]['PassList']['LowCurveStDevOverTime']) > 2 + if curvePass: + self.totalCurveResult.configure(text=sprit_utils.check_mark(), font=("TkDefaultFont", 16, "bold"), foreground='green') + else: + self.totalCurveResult.configure(text=sprit_utils.x_mark(), font=("TkDefaultFont", 16, "bold"), foreground='red') + + self.peakTest1ResultText.configure(text=hvsr_results['BestPeak'][azimuth]['Report']['A(f-)'][:-1]) + self.peakTest1Result.configure(text=hvsr_results['BestPeak'][azimuth]['Report']['A(f-)'][-1]) + + self.peakTest2ResultText.configure(text=hvsr_results['BestPeak'][azimuth]['Report']['A(f+)'][:-1]) + self.peakTest2Result.configure(text=hvsr_results['BestPeak'][azimuth]['Report']['A(f+)'][-1]) + + self.peakTest3ResultText.configure(text=hvsr_results['BestPeak'][azimuth]['Report']['A0'][:-1]) + self.peakTest3Result.configure(text=hvsr_results['BestPeak'][azimuth]['Report']['A0'][-1]) + + self.peakTest4ResultText.configure(text=hvsr_results['BestPeak'][azimuth]['Report']['P-'][:5] + ' and ' +hvsr_results['BestPeak'][azimuth]['Report']['P+'][:-1]) + if hvsr_results['BestPeak'][azimuth]['PassList']['FreqStability']: + self.peakTest4Result.configure(text=sprit_utils.check_mark()) + else: + self.peakTest4Result.configure(text=sprit_utils.x_mark()) + + self.peakTest5ResultText.configure(text=hvsr_results['BestPeak'][azimuth]['Report']['Sf'][:-1]) + self.peakTest5Result.configure(text=hvsr_results['BestPeak'][azimuth]['Report']['Sf'][-1]) + + self.peakTest6ResultText.configure(text=hvsr_results['BestPeak'][azimuth]['Report']['Sa'][:-1]) + self.peakTest6Result.configure(text=hvsr_results['BestPeak'][azimuth]['Report']['Sa'][-1]) + + peakPass = (hvsr_results['BestPeak'][azimuth]['PassList']['PeakProminenceBelow'] + + hvsr_results['BestPeak'][azimuth]['PassList']['PeakProminenceAbove']+ + hvsr_results['BestPeak'][azimuth]['PassList']['PeakAmpClarity']+ + hvsr_results['BestPeak'][azimuth]['PassList']['FreqStability']+ + hvsr_results['BestPeak'][azimuth]['PassList']['PeakStability_FreqStD']+ + hvsr_results['BestPeak'][azimuth]['PassList']['PeakStability_AmpStD']) >= 5 + if peakPass: + self.totalPeakResult.configure(text=sprit_utils.check_mark(), font=("TkDefaultFont", 16, "bold"), foreground='green') + else: + self.totalPeakResult.configure(text=sprit_utils.x_mark(), font=("TkDefaultFont", 16, "bold"), foreground='red') + + if curvePass and peakPass: + self.totalResult.configure(text=f'Pass {sprit_utils.check_mark()}', font=("TkDefaultFont", 22, "bold"), foreground='green') + else: + self.totalResult.configure(text=f'Fail {sprit_utils.x_mark()}', font=("TkDefaultFont", 22, "bold"), foreground='red') + + sprit_hvsr.plot_hvsr(hvsr_results, plot_type=get_kindstr(), fig=self.fig_results, ax=self.ax_results, use_subplots=True, clear_fig=False) + + #FUNCTION TO PROCESS DATA + @catch_errors + def process_data(): + update_progress_bars(prog_percent=0) + #messagebox.showinfo("Processing Data", 'Processing Data...') + self.processingData = True #Set to true while data processing algorithm is being run + + if not self.data_read: + self.hvsr_data = read_data() + update_progress_bars(prog_percent=12) + + self.log_text.insert('end', f"\n\nProcessing Data [{datetime.datetime.now()}]\n\n") + self.log_text.insert('end', f"{self.generate_ppsd_call['text']}\n\n") + + + self.hvsr_data = sprit_hvsr.remove_noise(hvsr_data=self.hvsr_data, + remove_method='auto', + sat_percent=0.995, + noise_percent=0.8, + sta=2, + lta=30, + stalta_thresh=[0.5, 5], + warmup_time=0, + cooldown_time=0, + min_win_size=1, + remove_raw_noise=False) + update_progress_bars(prog_percent=12) + self.hvsr_data = plot_noise_windows(self.hvsr_data) + + update_progress_bars(prog_percent=15) + self.hvsr_data = sprit_hvsr.generate_ppsds(hvsr_data=self.hvsr_data, + remove_outliers=self.remove_outliers.get(), + outlier_std=self.outlier_std.get(), + ppsd_length=self.ppsd_length.get(), + overlap=self.overlap.get(), + period_step_octaves=self.perStepOct.get(), + skip_on_gaps=self.skip_on_gaps.get(), + db_bins=self.db_bins, + period_limits=self.period_limits, + period_smoothing_width_octaves=self.perSmoothWidthOct.get(), + special_handling=special_handling#, verbose=True + ) + update_progress_bars(prog_percent=50) + + self.hvsr_data = sprit_hvsr.remove_outlier_curves(hvsr_data=self.hvsr_data, + rmse_thresh=98, + use_percentile=True, + use_hv_curve = False, + show_plot = False) + update_progress_bars(prog_percent=60) + + self.log_text.insert('end', f"{self.procHVSR_call['text']}\n\n") + self.hvsr_results = sprit_hvsr.process_hvsr(hvsr_data=self.hvsr_data, + method=self.method_ind, + smooth=self.hvsmooth_param, + freq_smooth=self.freq_smooth.get(), + f_smooth_width=self.fSmoothWidth.get(), + resample=self.hvresample_int, + outlier_curve_rmse_percentile=True) + update_progress_bars(prog_percent=90) + + + self.log_text.insert('end', f"{self.checkPeaks_Call['text']}\n\n") + self.hvsr_results = sprit_hvsr.check_peaks(hvsr_data=self.hvsr_results, + hvsr_band = [self.hvsrBand_min.get(), self.hvsrBand_max.get()], + peak_freq_range=[self.peakFreqRange_min.get(), self.peakFreqRange_max.get()]) + update_progress_bars(prog_percent=95) + + + self.log_text.insert('end', f"{self.checkPeaks_Call['text']}\n\n") + if isinstance(self.hvsr_results, sprit_hvsr.HVSRData): + report_results(self.hvsr_results) + self.results_siteSelectFrame.grid_forget() + elif isinstance(self.hvsr_results, sprit_hvsr.HVSRBatch): + self.results_siteSelectFrame.grid(row=0, column=0, columnspan=10, sticky='ew') + report_results(self.hvsr_results[self.hvsr_results.sites[0]]) + else: + warnings.warn(f'Data is of type {type(self.hvsr_results)}; should be HVSRData or HVSRBatch type.') + + #Log results + self.log_text.insert('end', f"Processing completed at [{datetime.datetime.now()}]\n\n") + self.hvsr_results = sprit_hvsr.get_report(self.hvsr_results, report_format='print', no_output=True) + if isinstance(self.hvsr_results, sprit_hvsr.HVSRData): + #format data to be same as HVSRBatch + hvsrResults = {'sitename_placeholder':self.hvsr_results} + else: + hvsrResults = self.hvsr_results + + for sitename in hvsrResults.keys(): + self.log_text.insert('end', f"{hvsrResults[sitename]['Print_Report']}\n\n") + + self.processingData = False + self.tab_control.select(self.results_tab) + update_progress_bars(prog_percent=100) + + global update_progress_bars + def update_progress_bars(prog_percent, process_name='Processing'): + progBarListList = [[self.inputProgBar,(0,0), True], + [self.prevProgBar,(0,0), True], + [self.noiseProgBar,(0,0), True], + [self.settingsProgBar_ppsd, (0, 0), True], + [self.settingsProgBar_hvsr, (0,0), True], + [self.settingsProgBar_plot,(0,0), True], + [self.logProgBar,(0,11), False], + [self.resultsProgBar,(0,26), False]] + + def prog_bar_update(progBarListList, progPercent, processName): + for bar in progBarListList: + progBar = bar[0] + barLoc = bar[1] + + progBar['value'] = progPercent + + if progPercent==0: + progBar.master.columnconfigure(0, weight=1) + progBar.grid(row=barLoc[0],column=barLoc[1], sticky='ew') + elif progPercent==100: + progBar.grid_forget() + + progBar.update() + + threading.Thread(target=prog_bar_update(progBarListList=progBarListList, progPercent=prog_percent, processName=process_name)).start() + #self.update_idletasks() + + def update_input_params_call(): + prevCall = self.input_params_call.cget('text') + self.input_params_call.configure(text="input_params( input_data='{}', metapath={}, site='{}', instrument='{}',\n\tnetwork='{}', station='{}', loc='{}', channels=[{}, {}, {}], \n\tacq_date='{}', starttime='{}', endttime='{}', tzone='{}', \n\txcoord={}, ycoord={}, elevation={}, input_crs='{}', output_crs='{}', elev_unit='{}', \n\thvsr_band=[{}, {}], peak_freq_range=[{}, {}])".format( + self.data_path.get(), self.meta_path.get(), self.site_name.get(), self.instrumentSel.get(), + self.network.get(), self.station.get(), self.location.get(), + self.z_channel.get(), self.e_channel.get(), self.n_channel.get(), + self.acq_date, self.starttime.time(), self.endtime.time(), self.tz, + self.x.get(), self.y.get(), self.z.get(), + self.input_crs.get(), self.output_crs.get(), self.elev_unit.get(), + self.hvsrBand_min.get(), self.hvsrBand_max.get(), + self.peakFreqRange_min.get(), self.peakFreqRange_max.get())) + + newCall = self.input_params_call.cget('text') + if prevCall==newCall: + self.data_read=True + else: + self.data_read = False + + #Specify site name + siteLabel = ttk.Label(hvsrFrame, text="Site Name") + siteLabel.grid(row=0, column=0, sticky='e', padx=5) + self.site_name = tk.StringVar() + self.site_name.set('HVSR Site') + self.site_name_entry = ttk.Entry(hvsrFrame, textvariable=self.site_name, validate='focusout', validatecommand=update_input_params_call) + self.site_name_entry.grid(row=0, column=1, columnspan=1, sticky='ew', padx=5) + + def on_source_select(): + self.data_read = False + try: + str(self.file_source.get()) + sourceLabel.configure(text="source='{}'".format(self.file_source.get())) + update_fetch_call() + + if self.file_source.get() == 'raw' or self.file_source.get() == 'dir': + self.browse_data_filepath_button.configure(text='Browse Folder') + self.batch_options_frame.grid_forget() + elif self.file_source.get() == 'batch': + self.batch_options_frame.grid(row=11, column=0, columnspan=7, sticky='ew') + self.browse_data_filepath_button.configure(text='Browse File(s)') + else: + self.browse_data_filepath_button.configure(text='Browse File(s)') + self.batch_options_frame.grid_forget() + return True + except ValueError: + return False + + sourceLabel = ttk.Label(master=hvsrFrame, text="source='file'") + + ttk.Label(master=hvsrFrame, text='Data Source Type [str]').grid(row=0, column=3, sticky='e', padx=5) + sourcFrame= ttk.Frame(hvsrFrame) + sourcFrame.grid(row=0, column=4, sticky='w', columnspan=3) + self.file_source = tk.StringVar() + self.file_source.set('file') + ttk.Radiobutton(master=sourcFrame, text='File', variable=self.file_source, value='file', command=on_source_select).grid(row=0, column=0, sticky='w', padx=(5, 10)) + ttk.Radiobutton(master=sourcFrame, text='Raw', variable=self.file_source, value='raw', command=on_source_select).grid(row=0, column=1, sticky='w', padx=(5, 10)) + ttk.Radiobutton(master=sourcFrame, text='Batch', variable=self.file_source, value='batch', command=on_source_select).grid(row=0, column=2, sticky='w', padx=(5, 10)) + ttk.Radiobutton(master=sourcFrame, text='Directory', variable=self.file_source, value='dir', command=on_source_select).grid(row=0, column=3, sticky='w', padx=(5, 10)) + + #Instrument select + ttk.Label(hvsrFrame, text="Instrument").grid(row=0, column=6, sticky='e', padx=5) + inst_options = ["Raspberry Shake", "Tromino", "Other"] + + + def on_option_select(self, inst): + update_input_params_call() + if inst == "Raspberry Shake": + self.network_entry.configure(state='normal') + self.station_entry.configure(state='normal') + self.location_entry.configure(state='normal') + + self.z_channel_entry.delete(0, 'end') + self.e_channel_entry.delete(0, 'end') + self.n_channel_entry.delete(0, 'end') + + self.z_channel_entry.insert(0,"EHZ") + self.e_channel_entry.insert(0,"EHE") + self.n_channel_entry.insert(0,"EHN") + + self.network_entry.delete(0, 'end') + self.network_entry.insert(0,"AM") + + self.station_entry.delete(0, 'end') + self.station_entry.insert(0,"RAC84") + + self.location_entry.delete(0, 'end') + self.location_entry.insert(0,"00") + else: + self.network_entry.configure(state='disabled') + self.station_entry.configure(state='disabled') + self.location_entry.configure(state='disabled') + + self.instrumentSel = tk.StringVar(value=inst_options[0]) + self.instrument_dropdown = ttk.OptionMenu(hvsrFrame, self.instrumentSel, inst_options[0], *inst_options, command=on_option_select) + self.instrument_dropdown.config(width=20) + self.instrument_dropdown.grid(row=0, column=7, columnspan=1, sticky='ew') + + # Data Filepath + dataLabel= ttk.Label(hvsrFrame, text="Data Filepath") + dataLabel.grid(row=1, column=0, sticky='e', padx=5, pady=(5,2.55)) + + #Function to set self.data_read False whenever the data_path is updated + + def on_data_path_change(data_path, index, trace_mode): + #If our data path changes, data is registered as not having been read + #This is primarily so that if just the Run button is pushed, it will know to first read the data + self.data_read = False + + def filepath_update(): + self.data_read = False + self.fpath = self.data_path.get() + self.data_read = False + update_input_params_call() + + self.data_path = tk.StringVar() + self.data_path.set('sample') + self.fpath = self.data_path.get() + self.data_path.trace_add('write', on_data_path_change) + self.data_filepath_entry = ttk.Entry(hvsrFrame, textvariable=self.data_path, validate='focusout', validatecommand=filepath_update) + self.data_filepath_entry.grid(row=1, column=1, columnspan=6, sticky='ew', padx=5, pady=(5,2.55)) + + + def browse_data_filepath(): + if self.file_source.get() == 'raw' or self.file_source.get() == 'dir': + self.fpath = filedialog.askdirectory() + if self.fpath: + self.data_filepath_entry.delete(0, 'end') + self.data_filepath_entry.insert(0, self.fpath) + else: + self.fpath = filedialog.askopenfilenames() + + #fpath will always be tuple + self.no_data_files = len(self.fpath) + + if self.fpath: + self.data_filepath_entry.delete(0, 'end') + for f in self.fpath: + self.data_filepath_entry.insert('end', self.fpath) + + update_input_params_call() + + buttonFrame = ttk.Frame(hvsrFrame) + buttonFrame.grid(row=1, column=7, sticky='ew') + + self.browse_data_filepath_button = ttk.Button(buttonFrame, text="Browse File(s)", command=browse_data_filepath) + + #self.browse_data_filepath_button.grid(row=1, column=6, sticky='ew') + self.browse_data_filepath_button.pack(side="left", fill="x", expand=True, padx=(0,2), pady=(5,2.55)) + + # Metadata Filepath + ttk.Label(hvsrFrame, text="Metadata Filepath").grid(row=2, column=0, sticky='e', padx=5, pady=(2.5,5)) + self.meta_path = tk.StringVar() + self.meta_path.set('') + self.metadata_filepath_entry = ttk.Entry(hvsrFrame, textvariable=self.meta_path, validate='focusout', validatecommand=update_input_params_call) + self.metadata_filepath_entry.grid(row=2, column=1, columnspan=6, sticky='ew', padx=5, pady=(2.5,5)) + + + def browse_metadata_filepath(): + self.data_read = False #New file will not have been read, set to False + filepath = filedialog.askopenfilename() + if filepath: + self.metadata_filepath_entry.delete(0, 'end') + self.metadata_filepath_entry.insert(0, filepath) + update_input_params_call() + + self.browse_metadata_filepath_button = ttk.Button(hvsrFrame, text="Browse", command=browse_metadata_filepath) + self.browse_metadata_filepath_button.grid(row=2, column=7, sticky='ew', padx=0, pady=(2.5,5)) + + + def update_acq_date(): + aMonth = self.acq_month.get() + if str(aMonth)[0]=='0': + aMonth = str(aMonth)[-1] + + aDay = self.acq_day.get() + if str(aDay)[0]=='0': + aDay = str(aDay)[-1] + + self.acq_date = datetime.date(year=self.acq_year.get(), month=aMonth, day=aDay) + self.day_of_year = self.acq_date.timetuple().tm_yday + self.doy_label.configure(text=str(self.day_of_year)) + update_input_params_call() + + # Date and Time + dateFrame = ttk.Frame(hvsrFrame) + dateFrame.grid(row=3, column=1, columnspan=2, sticky='e', padx=5) + ttk.Label(dateFrame, text="Date").grid(row=1, column=1, sticky='e', padx=5) + + self.acq_year = tk.IntVar() + self.acq_year.set(int(datetime.datetime.today().year)) + self.acq_year_entry = ttk.Spinbox(dateFrame, from_=0, to=10000, width=7, textvariable=self.acq_year, validate='focusout', validatecommand=update_acq_date) + self.acq_year_entry.grid(row=1, column=2, sticky='ew', padx=1) + + self.acq_month = tk.IntVar() + self.acq_month.set(int(datetime.datetime.today().month)) + self.acq_month_entry = ttk.Spinbox(dateFrame, from_=0, to=12, width=3, textvariable=self.acq_month, validate='focusout', validatecommand=update_acq_date) + self.acq_month_entry.grid(row=1, column=3, sticky='ew', padx=1) + + self.acq_day = tk.IntVar() + self.acq_day.set(int(datetime.datetime.today().day)) + self.acq_day_entry = ttk.Spinbox(dateFrame, from_=0, to=31, width=3, textvariable=self.acq_day, validate='focusout', validatecommand=update_acq_date) + self.acq_day_entry.grid(row=1, column=4, sticky='ew', padx=1) + + self.acq_date = datetime.date.today() + + sTimeFrame = ttk.Frame(hvsrFrame) + sTimeFrame.grid(row=3, column=4, sticky='ew') + + def get_times(): + #Format starttime as datetime object (in timezone as originally entered) + self.acq_date = datetime.date(year=self.acq_year.get(), month=self.acq_month.get(), day=self.acq_day.get()) + + sHour = self.start_hour.get() + if str(sHour)[0] == '0': + sHour = int(str(sHour)[-1]) + + sMin = self.start_minute.get() + if str(sMin)[0] == '0': + sMin = int(str(sMin)[-1]) + + self.starttime = datetime.datetime(year = int(self.acq_date.year), + month = int(self.acq_date.month), + day = int(self.acq_date.day), + hour = int(sHour), + minute = int(sMin), + tzinfo=self.tz) + + #Get duration, as originally entered + hour_dur = self.end_hour.get() - self.start_hour.get() + if hour_dur < 0: + hour_dur = self.end_hour.get() + 24 - self.start_hour.get() + min_dur = self.end_minute.get() - self.start_minute.get() + + #Convert starttime to utc + #self.starttime = self.tz.normalize(self.tz.localize(self.starttime)).astimezone(pytz.utc) + self.starttime = self.starttime.astimezone(datetime.timezone.utc) + + #Get endttime based on utc starttime and original duration + self.endtime = self.starttime + datetime.timedelta(hours=hour_dur, minutes=min_dur) + + return self.starttime, self.endtime + + self.tz = datetime.timezone.utc + + + def any_time_change(): + self.data_read = False #New file will not have been read, set to False + self.acq_date = datetime.date(year=self.acq_year.get(), month=self.acq_month.get(), day=self.acq_day.get()) + self.starttime, self.endtime = get_times() + update_input_params_call() + + ttk.Label(hvsrFrame, text="Start Time").grid(row=3, column=3, sticky='e', padx=5) + colonLabel= ttk.Label(sTimeFrame, text=":")#.grid(row=3, column=4, padx=(20,0), sticky='w') + self.start_hour = tk.IntVar() + self.start_hour.set(00) + self.start_time_hour_entry = ttk.Spinbox(sTimeFrame, from_=0, to=23, width=5, textvariable=self.start_hour, validate='focusout', validatecommand=any_time_change) + self.start_time_hour_entry#.grid(row=3, column=4, sticky='w') + self.start_minute = tk.DoubleVar() + self.start_minute.set(00) + self.start_time_min_entry = ttk.Spinbox(sTimeFrame, from_=0, to=59, width=5, textvariable=self.start_minute, validate='focusout', validatecommand=any_time_change) + self.start_time_min_entry#.grid(row=3, column=4, padx=80, sticky='w') + + #sTLabel.pack(side="left", fill="x", expand=True) + self.start_time_hour_entry.pack(side='left', expand=True) + colonLabel.pack(side="left", fill="x") + self.start_time_min_entry.pack(side='right', expand=True) + + eTimeFrame = ttk.Frame(hvsrFrame) + eTimeFrame.grid(row=3, column=6, sticky='ew') + ttk.Label(hvsrFrame, text="End Time").grid(row=3, column=5, sticky='e', padx=5) + colonLabel = ttk.Label(eTimeFrame, text=":")#.grid(row=3, column=6, padx=(20,0), sticky='w') + self.end_hour = tk.IntVar() + self.end_hour.set(23) + self.end_time_hour_entry = ttk.Spinbox(eTimeFrame, from_=0, to=23, width=5, textvariable=self.end_hour, validate='focusout', validatecommand=any_time_change) + self.end_time_hour_entry#.grid(row=3, column=+, sticky='w') + self.end_minute = tk.DoubleVar() + self.end_minute.set(59) + self.end_time_min_entry = ttk.Spinbox(eTimeFrame, from_=0, to=59, width=5, textvariable=self.end_minute, validate='focusout', validatecommand=any_time_change) + self.end_time_min_entry#.grid(row=3, column=+, padx=80, sticky='w') + + #eTLabel.pack(side="left", fill="x", expand=True) + self.end_time_hour_entry.pack(side='left', expand=True) + colonLabel.pack(side="left", fill="x") + self.end_time_min_entry.pack(side='right', expand=True) + + self.acq_date = datetime.date(year=self.acq_year.get(), month=self.acq_month.get(), day=self.acq_day.get()) + self.starttime, self.endtime = get_times() + + + def onTimezoneSelect(event): + #Listbox "loses" selection and triggers an event sometimes, so need to check if that is just what happened + if self.timezone_listbox.curselection(): + #If it was an actual selection, update timezone + self.tz = zoneinfo.ZoneInfo(self.timezone_listbox.get(self.timezone_listbox.curselection())) + else: + #If it was just the listbox losing the selection, don't change anything + pass + update_input_params_call() + + self.timezone_listbox = tk.Listbox(hvsrFrame, selectmode='browse', height=25) + + self.timezone_listbox.insert('end', 'UTC') + self.timezone_listbox.insert('end', 'US/Central') + + for tz in zoneinfo.available_timezones():# pytz.all_timezones: + if tz !='UTC': + self.timezone_listbox.insert('end', tz) + self.timezone_listbox.selection_set(0) + self.timezone_listbox.bind('<<ListboxSelect>>', onTimezoneSelect) + + ttk.Label(hvsrFrame,text="Timezone").grid(row=3,column=7, sticky='w', padx=5) + self.timezone_listbox.grid(row=4,column=7, rowspan=26, sticky='nsew', padx=5) + + # DOY + self.day_of_year = self.acq_date.timetuple().tm_yday + + ttk.Label(hvsrFrame,text="Day of Year:").grid(row=4, column=1, sticky='e', padx=5, pady=10) + self.doy_label = ttk.Label(hvsrFrame, text=str(self.day_of_year)) + self.doy_label.grid(row=4, column=2, sticky='w') + + # UTC Time Output + ttk.Label(hvsrFrame,text="UTC Time:").grid(row=4, column=3, sticky='e', padx=5, pady=10) + self.utc_time_output_label = ttk.Label(hvsrFrame, text="") + self.utc_time_output_label.grid(row=4, column=4) + + # Initialize as UTC + self.tz = datetime.timezone.utc + + + self.starttime, self.endtime = get_times() + + # X Y Z CRS Depth + ttk.Label(hvsrFrame,text="X").grid(row=5,column=1, sticky='e', padx=5, pady=10) + self.x = tk.DoubleVar() + self.x.set(0) + self.x_entry = ttk.Entry(hvsrFrame, textvariable=self.x, validate='focusout', validatecommand=update_input_params_call) + self.x_entry.grid(row=5,column=2, sticky='w', padx=0) + + ttk.Label(hvsrFrame,text="Y").grid(row=5,column=3, sticky='e', padx=5, pady=10) + self.y = tk.DoubleVar() + self.y.set(0) + self.y_entry = ttk.Entry(hvsrFrame, textvariable=self.y, validate='focusout', validatecommand=update_input_params_call) + self.y_entry.grid(row=5, column=4, sticky='w', padx=0) + + ttk.Label(hvsrFrame,text="Z").grid(row=5,column=5, sticky='e', padx=5, pady=10) + self.z = tk.DoubleVar() + self.z.set(0) + self.z_entry = ttk.Entry(hvsrFrame, textvariable=self.z, validate='focusout', validatecommand=update_input_params_call) + self.z_entry.grid(row=5,column=6, sticky='w', padx=0) + + ttk.Label(hvsrFrame,text="Input CRS").grid(row=6,column=1, sticky='e', padx=5, pady=10) + self.input_crs = tk.StringVar() + self.input_crs.set('EPSG:4326') + self.input_crs_entry = ttk.Entry(hvsrFrame, textvariable=self.input_crs, validate='focusout', validatecommand=update_input_params_call) + self.input_crs_entry.grid(row=6,column=2, sticky='w', padx=0) + + ttk.Label(hvsrFrame,text="Output CRS").grid(row=6,column=3, sticky='e', padx=5, pady=10) + self.output_crs = tk.StringVar() + self.output_crs.set('EPSG:4326') + self.output_crs_entry = ttk.Entry(hvsrFrame, textvariable=self.output_crs, validate='focusout', validatecommand=update_input_params_call) + self.output_crs_entry.grid(row=6, column=4, sticky='w', padx=0) + + ttk.Label(master=hvsrFrame, text='Elevation Unit').grid(row=6, column=5, sticky='e', padx=5, pady=10) + elevUnitFrame= ttk.Frame(hvsrFrame) + elevUnitFrame.grid(row=6, column=6, sticky='w', columnspan=3) + self.elev_unit = tk.StringVar() + self.elev_unit.set('meters') + ttk.Radiobutton(master=elevUnitFrame, text='Meters', variable=self.elev_unit, value='meters', command=update_input_params_call).grid(row=0, column=0, sticky='w', padx=(5, 10)) + ttk.Radiobutton(master=elevUnitFrame, text='Feet', variable=self.elev_unit, value='feet', command=update_input_params_call).grid(row=0, column=1, sticky='w', padx=(5, 10)) + + # Network Station Location + ttk.Label(hvsrFrame,text="Network").grid(row=7,column=1, sticky='e', padx=5, pady=10) + self.network = tk.StringVar() + self.network.set('AM') + self.network_entry = ttk.Entry(hvsrFrame, textvariable=self.network, validate='focusout', validatecommand=update_input_params_call) + self.network_entry.grid(row=7,column=2, sticky='w', padx=0) + + ttk.Label(hvsrFrame,text="Station").grid(row=7,column=3, sticky='e', padx=5, pady=10) + self.station = tk.StringVar() + self.station.set('RAC84') + self.station_entry = ttk.Entry(hvsrFrame, textvariable=self.station, validate='focusout', validatecommand=update_input_params_call) + self.station_entry.grid(row=7,column=4, sticky='w', padx=0) + + ttk.Label(hvsrFrame,text="Location").grid(row=7,column=5, sticky='e', padx=5, pady=10) + self.location = tk.StringVar() + self.location.set('00') + self.location_entry = ttk.Entry(hvsrFrame, textvariable=self.location, validate='focusout', validatecommand=update_input_params_call) + self.location_entry.grid(row=7,column=6, sticky='w', padx=0) + + # Z N E Channels + ttk.Label(hvsrFrame,text="Z Channel").grid(row=8,column=1, sticky='e', padx=5, pady=10) + self.z_channel = tk.StringVar() + self.z_channel.set('EHZ') + self.z_channel_entry = ttk.Entry(hvsrFrame, textvariable=self.z_channel, validate='focusout', validatecommand=update_input_params_call) + self.z_channel_entry.grid(row=8,column=2, sticky='w', padx=0) + + ttk.Label(hvsrFrame,text="N Channel").grid(row=8,column=3, sticky='e', padx=5, pady=10) + self.n_channel = tk.StringVar() + self.n_channel.set('EHN') + self.n_channel_entry = ttk.Entry(hvsrFrame, textvariable=self.n_channel, validate='focusout', validatecommand=update_input_params_call) + self.n_channel_entry.grid(row=8,column=4, sticky='w', padx=0) + + ttk.Label(hvsrFrame,text="E Channel").grid(row=8,column=5, sticky='e', padx=5, pady=10) + self.e_channel = tk.StringVar() + self.e_channel.set('EHE') + self.e_channel_entry = ttk.Entry(hvsrFrame, textvariable=self.e_channel, validate='focusout', validatecommand=update_input_params_call) + self.e_channel_entry.grid(row=8,column=6, sticky='w', padx=0) + + # HVSR Band + def on_hvsrband_update(): + try: + float(self.hvsrBand_min.get()) + float(self.hvsrBand_max.get()) + + hvsrBandLabel.configure(text='hvsr_band=[{}, {}]'.format(self.hvsrBand_min.get(), self.hvsrBand_max.get())) + update_check_peaks_call(self.checkPeaks_Call) + update_input_params_call() + return True + except ValueError: + return False + + ttk.Label(hvsrFrame,text="HVSR Band").grid(row=9,column=1, sticky='e', padx=10, pady=10) + hvsrbandframe= ttk.Frame(hvsrFrame) + hvsrbandframe.grid(row=9, column=2,sticky='w') + self.hvsrBand_min = tk.DoubleVar() + self.hvsrBand_min.set(0.4) + hvsr_band_min_entry = ttk.Entry(hvsrbandframe, width=9, textvariable=self.hvsrBand_min, validate='focusout', validatecommand=on_hvsrband_update) + hvsr_band_min_entry.grid(row=0, column=0, sticky='ew', padx=(0,2)) + + self.hvsrBand_max = tk.DoubleVar() + self.hvsrBand_max.set(40) + hvsr_band_max_entry = ttk.Entry(hvsrbandframe, width=9,textvariable=self.hvsrBand_max, validate='focusout', validatecommand=on_hvsrband_update) + hvsr_band_max_entry.grid(row=0,column=1, sticky='ew', padx=(2,0)) + + # Peak Freq Range Band + def on_peakFreqRange_update(): + try: + float(self.peakFreqRange_min.get()) + float(self.peakFreqRange_max.get()) + + peakFreqRangeLabel.configure(text='peak_freq_range=[{}, {}]'.format(self.peakFreqRange_min.get(), self.peakFreqRange_max.get())) + update_check_peaks_call(self.checkPeaks_Call) + update_input_params_call() + return True + except ValueError: + return False + + ttk.Label(hvsrFrame,text="Peak Freq. Range").grid(row=9,column=3, sticky='e', padx=10, pady=10) + peakFreqRangeframe= ttk.Frame(hvsrFrame) + peakFreqRangeframe.grid(row=9, column=4,sticky='w') + self.peakFreqRange_min = tk.DoubleVar() + self.peakFreqRange_min.set(0.4) + peakFreqRange_min_entry = ttk.Entry(peakFreqRangeframe, width=9, textvariable=self.peakFreqRange_min, validate='focusout', validatecommand=on_peakFreqRange_update) + peakFreqRange_min_entry.grid(row=0, column=0, sticky='ew', padx=(0,2)) + + self.peakFreqRange_max = tk.DoubleVar() + self.peakFreqRange_max.set(40) + peakFreqRange_max_entry = ttk.Entry(peakFreqRangeframe, width=9,textvariable=self.peakFreqRange_max, validate='focusout', validatecommand=on_peakFreqRange_update) + peakFreqRange_max_entry.grid(row=0,column=1, sticky='ew', padx=(2,0)) + + #BATCH Section + def update_batch_data_read_call(): + self.batch_read_data_call.configure(text="batch_data_read(input_data, batch_type='{}', param_col={}, batch_params={})".format( + self.batch_type.get(), self.param_col.get(), self.batch_params.get())) + return + + def on_batch_type_select(): + update_batch_data_read_call() + return + + self.batch_options_frame = ttk.LabelFrame(hvsrFrame, text='Batch Options') + ttk.Label(self.batch_options_frame, text="Batch Type").grid(row=0,column=0, sticky='e', padx=10, pady=10) + batchTypeFrame= ttk.Frame(self.batch_options_frame) + batchTypeFrame.grid(row=0, column=1, sticky='w', columnspan=3) + self.batch_type = tk.StringVar() + self.batch_type.set('table') + ttk.Radiobutton(master=batchTypeFrame, text='Table', variable=self.batch_type, value='table', command=on_batch_type_select).grid(row=0, column=0, sticky='w', padx=(5, 10)) + ttk.Radiobutton(master=batchTypeFrame, text='File list', variable=self.batch_type, value='filelist', command=on_batch_type_select).grid(row=0, column=1, sticky='w', padx=(5, 10)) + + ttk.Label(self.batch_options_frame,text="Parameter column name").grid(row=0,column=4, sticky='e', padx=5) + self.param_col = tk.StringVar() + self.param_col.set(None) + self.param_col_entry = ttk.Entry(self.batch_options_frame, textvariable=self.param_col, validate='focusout', validatecommand=update_batch_data_read_call) + self.param_col_entry.grid(row=0, column=5, sticky='w', padx=0) + ttk.Label(self.batch_options_frame,text="For batch_type='table' with single parameter column only").grid(row=1,column=4, columnspan=2, sticky='w', padx=5) + + ttk.Label(self.batch_options_frame,text="Batch parameters").grid(row=0,column=6, sticky='e', padx=5) + self.batch_params = tk.StringVar() + self.batch_params.set(None) + self.batch_params_entry = ttk.Entry(self.batch_options_frame, textvariable=self.batch_params, validate='focusout', validatecommand=update_batch_data_read_call, width=75) + self.batch_params_entry.grid(row=0, column=7, columnspan=3, sticky='ew', padx=0) + ttk.Label(self.batch_options_frame,text="To specify parameters used for reading in data").grid(row=1,column=6, columnspan=2, sticky='w', padx=5) + + self.batch_read_data_call = ttk.Label(self.batch_options_frame, text="batch_data_read(input_data, batch_type={}, param_col={}, batch_params={})".format( + self.batch_type.get(), self.param_col.get(), self.batch_params.get() )) + self.batch_read_data_call.grid(row=2,column=0, columnspan=10, sticky='w', padx=10, pady=10) + + self.batch_options_frame.grid(row=11, column=0, columnspan=7, sticky='ew') + self.batch_options_frame.grid_forget() + + separator = ttk.Separator(hvsrFrame, orient='horizontal') + separator.grid(row=12, column=0, columnspan=7, sticky='ew', padx=10) + + + def update_fetch_call(): + prevCall = self.input_params_call.cget('text') + if self.trim_dir.get()=='': + trim_dir = None + else: + trim_dir = self.trim_dir.get() + self.data_read = False #New file will not have been read, set to False + self.fetch_data_call.configure(text="fetch_data(params, source='{}', trim_dir={}, export_format='{}', detrend='{}', detrend_order={})" + .format(self.file_source.get(), trim_dir, self.export_format.get(), self.detrend.get(), self.detrend_order.get())) + + newCall = self.input_params_call.cget('text') + if prevCall==newCall: + self.data_read=True + else: + self.data_read = False + #export_format='.mseed' + + def on_obspyFormatSelect(self): + update_fetch_call() + ttk.Label(hvsrFrame, text="Data Format").grid(row=13, column=1, sticky='e', padx=5) + obspyformats = ['AH', 'ALSEP_PSE', 'ALSEP_WTH', 'ALSEP_WTN', 'CSS', 'DMX', 'GCF', 'GSE1', 'GSE2', 'KINEMETRICS_EVT', 'KNET', 'MSEED', 'NNSA_KB_CORE', 'PDAS', 'PICKLE', 'Q', 'REFTEK130', 'RG16', 'SAC', 'SACXY', 'SEG2', 'SEGY', 'SEISAN', 'SH_ASC', 'SLIST', 'SU', 'TSPAIR', 'WAV', 'WIN', 'Y'] + + self.export_format = tk.StringVar(value=obspyformats[11]) + self.data_format_dropdown = ttk.OptionMenu(hvsrFrame, self.export_format, obspyformats[11], *obspyformats, command=on_obspyFormatSelect) + self.data_format_dropdown.grid(row=13, column=2, columnspan=3, sticky='ew') + + #detrend='spline' + + def on_detrend_select(): + try: + str(self.detrend.get()) + update_fetch_call() + return True + except ValueError: + return False + + sourceLabel = ttk.Label(master=hvsrFrame, text="source='raw'") + + ttk.Label(master=hvsrFrame, text='Detrend type [str]').grid(row=14, column=1, sticky='e', padx=5) + detrendFrame= ttk.Frame(hvsrFrame) + detrendFrame.grid(row=14, column=2, sticky='w', columnspan=3) + self.detrend = tk.StringVar() + self.detrend.set('spline') + ttk.Radiobutton(master=detrendFrame, text='Spline', variable=self.detrend, value='spline', command=on_detrend_select).grid(row=0, column=0, sticky='w', padx=(5, 10)) + ttk.Radiobutton(master=detrendFrame, text='Polynomial', variable=self.detrend, value='polynomial', command=on_detrend_select).grid(row=0, column=1, sticky='w', padx=(5, 10)) + ttk.Radiobutton(master=detrendFrame, text='None', variable=self.detrend, value='none', command=on_detrend_select).grid(row=0, column=2, sticky='w', padx=(5, 10)) + + #detrend_order=2 + + def on_detrend_order(): + try: + int(self.detrend_order.get()) + update_fetch_call() + return True + except ValueError: + return False + + ttk.Label(hvsrFrame,text="Detrend Order [int]").grid(row=14,column=5, sticky='e', padx=5, pady=10) + self.detrend_order = tk.IntVar() + self.detrend_order.set(2) + self.detrend_order_entry = ttk.Entry(hvsrFrame, textvariable=self.detrend_order, validate='focusout', validatecommand=on_detrend_order) + self.detrend_order_entry.grid(row=14,column=6, sticky='w', padx=0) + + #trim_dir=False + def on_trim_dir(): + try: + str(self.trim_dir.get()) + update_fetch_call() + return True + except ValueError: + return False + + ttk.Label(hvsrFrame, text="Output Directory (trimmed data)").grid(row=15, column=0, sticky='e', padx=5, pady=(2.5,5)) + self.trim_dir = tk.StringVar() + self.trim_dir_entry = ttk.Entry(hvsrFrame, textvariable=self.trim_dir, validate='focusout', validatecommand=on_trim_dir) + self.trim_dir_entry.grid(row=15, column=1, columnspan=5, sticky='ew', padx=5, pady=(2.5,5)) + + + def browse_trim_dir_filepath(): + filepath = filedialog.askdirectory() + if filepath: + self.trim_dir_entry.delete(0, 'end') + self.trim_dir_entry.insert(0, filepath) + + self.trim_dir_filepath_button = ttk.Button(hvsrFrame, text="Browse", command=browse_trim_dir_filepath) + self.trim_dir_filepath_button.grid(row=15, column=6, sticky='ew', padx=0, pady=(2.5,5)) + + #self.starttime, self.endtime = get_times() + input_params_LF = ttk.LabelFrame(master=self.input_tab, text='input_params() call') + self.input_params_call = ttk.Label(master=input_params_LF, text="input_params( input_data='{}', metapath={}, site='{}', instrument='{}',\n\tnetwork='{}', station='{}', loc='{}', channels=[{}, {}, {}], \n\tacq_date='{}', starttime='{}', endttime='{}', tzone='{}', \n\txcoord={}, ycoord={}, elevation={}, input_crs='{}', output_crs='{}', elev_unit='{}', \n\thvsr_band=[{}, {}], peak_freq_range=[{}, {}])".format( + self.data_path.get(), self.meta_path.get(), self.site_name.get(), self.instrumentSel.get(), + self.network.get(), self.station.get(), self.location.get(), + self.z_channel.get(), self.e_channel.get(), self.n_channel.get(), + self.acq_date, self.starttime.time(), self.endtime.time(), self.tz, + self.x.get(), self.y.get(), self.z.get(), + self.input_crs.get(), self.output_crs.get(), self.elev_unit.get(), + self.hvsrBand_min.get(), self.hvsrBand_max.get(), + self.peakFreqRange_min.get(), self.peakFreqRange_max.get())) + self.input_params_call.pack(anchor='w', expand=True, padx=20) + + #fetch_data() call + fetch_data_LF = ttk.LabelFrame(master=self.input_tab, text='fetch_data() call') + self.fetch_data_call = ttk.Label(master=fetch_data_LF, text="fetch_data(params, source={}, trim_dir={}, export_format={}, detrend={}, detrend_order={})" + .format(self.file_source.get(), None, self.export_format.get(), self.detrend.get(), self.detrend_order.get())) + self.fetch_data_call.pack(anchor='w', expand=True, padx=20) + + #Set up frame for reading and running + runFrame_hvsr = ttk.Frame(self.input_tab) + runFrame_hvsr.columnconfigure(0, weight=1) + + self.inputProgBar = ttk.Progressbar(runFrame_hvsr, orient='horizontal') + self.inputProgBar.grid(row=0, column=0, sticky='ew')#.pack(fill='both',expand=True, side='left', anchor='sw') + + self.style.configure(style='Custom.TButton', background='#d49949') + self.read_button = ttk.Button(runFrame_hvsr, text="Read Data", command=read_data, width=30, style='Custom.TButton') + + self.style.configure('Run.TButton', background='#8b9685', width=10, height=3) + self.run_button = ttk.Button(runFrame_hvsr, text="Run", style='Run.TButton', command=process_data) + self.run_button.grid(row=0, column=2, sticky='nsew', padx=2.5)#.pack(side='right', anchor='se', padx=(10,0)) + self.read_button.grid(row=0, column=1, sticky='nsew', padx=2.5)#.pack(side='right', anchor='se') + + hvsrFrame.pack(fill='both', expand=True, side='top')#.grid(row=0, sticky="nsew") + runFrame_hvsr.pack(fill='both', side='bottom') + fetch_data_LF.pack(fill='x', side='bottom') + input_params_LF.pack(fill='x', side='bottom') + self.input_tab.pack(fill='both', expand=True) + self.tab_control.add(self.input_tab, text="Input") + + #Data Preview Tab + self.preview_data_tab = ttk.Frame(self.tab_control) + + # Configure the row and column of the input_tab to have a non-zero weight + self.preview_data_tab.pack(expand=1) + + self.inputdataFrame = ttk.LabelFrame(self.preview_data_tab, text="Input Data Viewer") + self.inputdataFrame.pack(expand=True, fill='both') + + self.inputInfoFrame = ttk.LabelFrame(self.inputdataFrame, text="Input Data Info") + self.input_data_label = ttk.Label(self.inputInfoFrame, text=self.data_filepath_entry.get()) + self.input_data_label.pack(anchor='w', fill='both', expand=True, padx=15) + self.inputInfoFrame.pack(expand=True, fill='both', side='top') + + self.inputDataViewFrame = ttk.LabelFrame(self.inputdataFrame, text="Input Data Plot") + + ttk.Label(master=self.inputInfoFrame, text=self.data_filepath_entry.get()).pack()#.grid(row=0, column=0) + + #Set up plot + #Reset axes, figure, and canvas widget + self.fig_pre = plt.figure() + + prev_mosaic = [['Z'],['N'],['E']] + self.ax_pre = self.fig_pre.subplot_mosaic(prev_mosaic, sharex=True) + + self.canvas_pre = FigureCanvasTkAgg(self.fig_pre, master=self.inputDataViewFrame) # A tk.DrawingArea. + self.canvas_pre.draw() + self.canvasPreWidget = self.canvas_pre.get_tk_widget()#.pack(side=tk.TOP, fill=tk.BOTH, expand=1) + self.preview_toolbar = NavigationToolbar2Tk(self.canvas_pre, self.inputDataViewFrame, pack_toolbar=False) + self.preview_toolbar.update() + + #self.canvas_pre.mpl_connect("button_release_event", select_windows) + + #Save preview figure + savePrevFigFrame = ttk.Frame(master=self.inputDataViewFrame) + + ttk.Label(savePrevFigFrame, text="Export Figure").grid(row=0, column=0, sticky='ew', padx=5) + self.previewFig_dir = tk.StringVar() + self.previewFig_dir_entry = ttk.Entry(savePrevFigFrame, textvariable=self.previewFig_dir) + self.previewFig_dir_entry.grid(row=0, column=1, columnspan=5, sticky='ew') + + + def filepath_preview_fig(): + filepath = filedialog.asksaveasfilename(defaultextension='.png', initialdir=pathlib.Path(self.data_path.get()).parent) + if filepath: + self.previewFig_dir_entry.delete(0, 'end') + self.previewFig_dir_entry.insert(0, filepath) + + + def save_preview_fig(): + self.fig_pre.savefig(self.previewFig_dir.get()) + + self.browsePreviewFig = ttk.Button(savePrevFigFrame, text="Browse",command=filepath_preview_fig) + self.browsePreviewFig.grid(row=0, column=7, sticky='ew', padx=2.5) + + self.savePreviewFig = ttk.Button(savePrevFigFrame, text="Save",command=save_preview_fig) + self.savePreviewFig.grid(row=0, column=8, columnspan=2, sticky='ew', padx=2.5) + + savePrevFigFrame.columnconfigure(1, weight=1) + + savePrevFigFrame.pack(side='bottom', fill='both', expand=False) + self.preview_toolbar.pack(side=tk.BOTTOM, fill=tk.X) + self.canvasPreWidget.pack(fill='both', expand=True)#.grid(row=0, column=0, sticky='nsew') + + self.inputDataViewFrame.pack(expand=True, fill='both', side='bottom') + + #preview-Run button + runFrame_dataPrev = ttk.Frame(self.preview_data_tab) + runFrame_dataPrev.columnconfigure(0, weight=1) + + self.prevProgBar = ttk.Progressbar(runFrame_dataPrev, orient='horizontal') + self.prevProgBar.grid(row=0, column=0, sticky='ew')#.pack(fill='both',expand=True, side='left', anchor='sw') + + self.run_button = ttk.Button(runFrame_dataPrev, text="Run", style='Run.TButton', command=process_data) + self.run_button.grid(row=0, column=1, sticky='nsew', padx=2.5)#.pack(side='bottom', anchor='e')#.grid(row=2, column=9, columnspan=20, sticky='e') + runFrame_dataPrev.pack(side='bottom', anchor='e', fill='both')#grid(row=1, sticky='e') + + self.tab_control.add(self.preview_data_tab, text="Data Preview") + + # NOISE TAB + self.noise_tab = ttk.Frame(self.tab_control) + self.canvasFrame_noise = ttk.LabelFrame(self.noise_tab, text='Noise Viewer') + + #Helper function for updating the canvas and drawing/deleted the boxes + + def __draw_windows(event, pathlist, ax_key, windowDrawn, winArtist, xWindows, fig, ax): + """Helper function for updating the canvas and drawing/deleted the boxes""" + for i, pa in enumerate(pathlist): + for j, p in enumerate(pa): + if windowDrawn[i][j]: + pass + else: + patch = matplotlib.patches.PathPatch(p, facecolor='k', alpha=0.75) + winArt = ax[ax_key].add_patch(patch) + windowDrawn[i][j] = True + winArtist[i][j] = winArt + + if event.button is MouseButton.RIGHT: + fig.canvas.draw() + + #Helper function for manual window selection + + def __draw_boxes(event, clickNo, xWindows, pathList, windowDrawn, winArtist, lineArtist, x0, fig, ax): + """Helper function for manual window selection to draw boxes to show where windows have been selected for removal""" + #Create an axis dictionary if it does not already exist so all functions are the same + + if isinstance(ax, np.ndarray) or isinstance(ax, dict): + ax = ax + else: + ax = {'a':ax} + + + if len(ax) > 1: + if type(ax) is not dict: + axDict = {} + for i, a in enumerate(ax): + axDict[str(i)] = a + ax = axDict + #else: + # ax = {'a':ax} + + #if event.inaxes!=ax: return + #y0, y1 = ax.get_ylim() + y0 = [] + y1 = [] + kList = [] + for k in ax.keys(): + kList.append(k) + y0.append(ax[k].get_ylim()[0]) + y1.append(ax[k].get_ylim()[1]) + #else: + # y0 = [ax.get_ylim()[0]] + # y1 = [ax.get_ylim()[1]] + + if self.clickNo == 0: + #y = np.linspace(ax.get_ylim()[0], ax.get_ylim()[1], 2) + self.x0 = event.xdata + self.clickNo = 1 + self.lineArtist.append([]) + winNums = len(self.xWindows) + for i, k in enumerate(ax.keys()): + linArt = ax[k].axvline(self.x0, 0, 1, color='k', linewidth=1, zorder=100) + self.lineArtist[winNums].append([linArt, linArt]) + #else: + # linArt = plt.axvline(self.x0, y0[i], y1[i], color='k', linewidth=1, zorder=100) + # self.lineArtist.append([linArt, linArt]) + else: + x1 = event.xdata + self.clickNo = 0 + + windowDrawn.append([]) + winArtist.append([]) + pathList.append([]) + winNums = len(self.xWindows) + for i, key in enumerate(kList): + path_data = [ + (matplotlib.path.Path.MOVETO, (self.x0, y0[i])), + (matplotlib.path.Path.LINETO, (x1, y0[i])), + (matplotlib.path.Path.LINETO, (x1, y1[i])), + (matplotlib.path.Path.LINETO, (self.x0, y1[i])), + (matplotlib.path.Path.LINETO, (self.x0, y0[i])), + (matplotlib.path.Path.CLOSEPOLY, (self.x0, y0[i])), + ] + codes, verts = zip(*path_data) + path = matplotlib.path.Path(verts, codes) + + windowDrawn[winNums].append(False) + winArtist[winNums].append(None) + + pathList[winNums].append(path) + __draw_windows(event=event, pathlist=pathList, ax_key=key, windowDrawn=windowDrawn, winArtist=winArtist, xWindows=self.xWindows, fig=fig, ax=ax) + linArt = ax[key].axvline(x1, 0, 1, color='k', linewidth=0.5, zorder=100) + + [self.lineArtist[winNums][i].pop(-1)] + self.lineArtist[winNums][i].append(linArt) + x_win = [self.x0, x1] + x_win.sort() #Make sure they are in the right order + self.xWindows.append(x_win) + fig.canvas.draw() + return self.clickNo, self.x0 + + #Helper function for manual window selection to draw boxes to deslect windows for removal + + def __remove_on_right(event, xWindows, pathList, windowDrawn, winArtist, lineArtist, fig, ax): + """Helper function for manual window selection to draw boxes to deslect windows for removal""" + + if self.xWindows is not None: + for i, xWins in enumerate(self.xWindows): + if event.xdata > xWins[0] and event.xdata < xWins[1]: + linArtists = self.lineArtist[i] + pathList.pop(i) + for j, a in enumerate(linArtists): + winArtist[i][j].remove()#.pop(i) + self.lineArtist[i][j][0].remove()#.pop(i)#[i].pop(j) + self.lineArtist[i][j][1].remove() + windowDrawn.pop(i) + self.lineArtist.pop(i)#[i].pop(j) + winArtist.pop(i)#[i].pop(j) + self.xWindows.pop(i) + fig.canvas.draw() + + + def select_windows(event, input=None, initialize=False): + """Function to manually select windows for exclusion from data. + + Parameters + ---------- + input : dict + Dictionary containing all the hvsr information. + + Returns + ------- + self.xWindows : list + List of two-item lists containing start and end times of windows to be removed. + """ + from matplotlib.backend_bases import MouseButton + import matplotlib.pyplot as plt + + #self.fig_noise, self.ax_noise = sprit_hvsr._plot_specgram_stream(stream=input['stream'], params=input, fig=self.fig_noise, ax=self.ax_noise, component='Z', stack_type='linear', detrend='mean', fill_gaps=0, dbscale=True, return_fig=True, cmap_per=[0.1,0.9]) + #self.fig_noise.canvas.draw() + + #if 'stream' in input.keys(): + # self.fig_noise, self.ax_noise = sprit_hvsr._plot_specgram_stream(stream=self.params['stream'], params=self.params, fig=self.fig_noise, ax=self.ax_noise, component='Z', stack_type='linear', detrend='mean', fill_gaps=0, dbscale=True, return_fig=True, cmap_per=[0.1,0.9]) + #else: + # params = input.copy() + # input = input['stream'] + + #if isinstance(input, obspy.core.stream.Stream): + # fig, ax = sprit_hvsr._plot_specgram_stream(input, component=['Z']) + #elif isinstance(input, obspy.core.trace.Trace): + # fig, ax = sprit_hvsr._plot_specgram_stream(input) + if initialize: + self.lineArtist = [] + self.winArtist = [] + self.windowDrawn = [] + self.pathList = [] + self.xWindows = [] + self.x0 = 0 + self.clickNo = 0 + + if not initialize: + __on_click(event) + self.hvsr_data['xwindows_out'] = self.xWindows + + + #self.fig_closed + #fig_closed = False + #while fig_closed is False: + # #fig.canvas.mpl_connect('button_press_event', __on_click)#(self.clickNo, self.xWindows, pathList, windowDrawn, winArtist, lineArtist, self.x0, fig, ax)) + # fig.canvas.mpl_connect('close_event', _on_fig_close)#(self.clickNo, self.xWindows, pathList, windowDrawn, winArtist, lineArtist, self.x0, fig, ax)) + # plt.pause(0.5) + + #output['xwindows_out'] = self.xWindows + #output['fig'] = fig + #output['ax'] = ax + noEvent = True + return self.hvsr_data + + #Support function to help select_windows run properly + + def _on_fig_close(event): + #self.fig_closed + fig_closed = True + return + + + def __on_click(event): + + if event.button is MouseButton.RIGHT: + __remove_on_right(event, self.xWindows, self.pathList, self.windowDrawn, self.winArtist, self.lineArtist, self.fig_noise, self.ax_noise) + + if event.button is MouseButton.LEFT: + self.clickNo, self.x0 = __draw_boxes(event, self.clickNo, self.xWindows, self.pathList, self.windowDrawn, self.winArtist, self.lineArtist, self.x0, self.fig_noise, self.ax_noise) + + #if 'hvsr_data' not in dir(self): + # self.hvsr_data = {'placeholder':None} + + def plot_noise_windows(hvsr_data={'placeholder':None}, initial_setup=False): + if 'hvsr_data' in dir(self): + hvsr_data = self.hvsr_data + + if isinstance(hvsr_data, sprit_hvsr.HVSRBatch): + batch_data = hvsr_data.copy() + hvsr_data = hvsr_data[list(hvsr_data.keys())[0]] + else: + batch_data = None + + if initial_setup: + self.xWindows=[] + else: + #Clear everything + for key in self.ax_noise: + self.ax_noise[key].clear() + self.fig_noise.clear() + + #Really make sure it's out of memory + self.fig_noise = [] + self.ax_noise = [] + try: + self.fig_noise.get_children() + except: + pass + try: + self.ax_noise.get_children() + except: + pass + + #Reset axes, figure, and canvas widget + self.fig_noise = plt.figure() + + noise_mosaic = [['spec'],['spec'],['spec'], + ['spec'],['spec'],['spec'], + ['signalz'],['signalz'], ['signaln'], ['signale']] + self.ax_noise = self.fig_noise.subplot_mosaic(noise_mosaic, sharex=True) + + + if not initial_setup: + self.noise_canvasWidget.destroy() + self.noise_toolbar.destroy() + self.fig_noise = sprit_hvsr._plot_specgram_stream(stream=hvsr_data['stream'], params=hvsr_data, fig=self.fig_noise, ax=self.ax_noise, component='Z', stack_type='linear', detrend='mean', fill_gaps=0, dbscale=True, return_fig=True, cmap_per=[0.1,0.9]) + + self.noise_canvas = FigureCanvasTkAgg(self.fig_noise, master=self.canvasFrame_noise) # A tk.DrawingArea. + self.noise_canvas.draw() + #self.noise_canvasWidget = self.noise_canvas.get_tk_widget()#.pack(side=tk.TOP, fill=tk.BOTH, expand=1) + # pack_toolbar=False will make it easier to use a layout manager later on. + self.noise_toolbar = NavigationToolbar2Tk(self.noise_canvas, self.canvasFrame_noise, pack_toolbar=False) + self.noise_toolbar.update() + + self.noise_canvasWidget = self.noise_canvas.get_tk_widget()#.pack(side=tk.TOP, fill=tk.BOTH, expand=1) + self.noise_canvas.mpl_connect("button_release_event", select_windows) + + self.noise_toolbar.pack(side=tk.BOTTOM, fill=tk.X) + self.noise_canvasWidget.pack(fill='both')#.grid(row=0, column=0, sticky='nsew') + + if not initial_setup: + if batch_data is None: + hvsr_data = {'SITENAME':hvsr_data} + else: + hvsr_data = batch_data + + for i, (k, hv_data) in enumerate(hvsr_data.items()): + #Reset edited data every time plot_noise_windows is run + #v_data['stream'] = hv_data['input_stream'].copy() + #Set initial input + #input = hv_data#['input_stream'] + + if self.do_stalta.get(): + hv_data = sprit_hvsr.remove_noise(hvsr_data=hv_data, remove_method='stalta', sta=self.sta.get(), lta=self.lta.get(), stalta_thresh=[self.stalta_thresh_low.get(), self.stalta_thresh_hi.get()]) + + if self.do_pctThresh.get(): + hv_data = sprit_hvsr.remove_noise(hvsr_data=hv_data, remove_method='saturation', sat_percent=self.pct.get()) + + if self.do_noiseWin.get(): + hv_data = sprit_hvsr.remove_noise(hvsr_data=hv_data, remove_method='noise', noise_percent=self.noise_amp_pct.get(), lta=self.lta_noise.get(), min_win_size=self.win_size_thresh.get()) + + if self.do_warmup.get(): + hv_data = sprit_hvsr.remove_noise(hvsr_data=hv_data, remove_method='warmup', warmup_time=self.warmup_time.get(), cooldown_time=self.cooldown_time.get()) + + if i==0: + self.fig_noise, self.ax_noise, self.noise_windows_line_artists, self.noise_windows_window_artists = sprit_hvsr._get_removed_windows(input=hv_data, fig=self.fig_noise, ax=self.ax_noise, existing_xWindows=self.xWindows, time_type='matplotlib') + self.fig_noise.canvas.draw() + + if batch_data is None: + hvsr_data = hvsr_data['SITENAME'] + + return hvsr_data + + self.fig_noise.canvas.draw() + return + + plot_noise_windows({'placeholder':None}, initial_setup=True) + self.canvasFrame_noise.pack(fill='both')#.grid(row=0, column=0, sticky="nsew") + + #noise_mosaic = [['spec'],['spec'],['spec'], + # ['spec'],['spec'],['spec'], + # ['signalz'],['signalz'], ['signaln'], ['signale']] + #self.fig_noise, self.ax_noise = plt.subplot_mosaic(noise_mosaic, sharex=True) + #self.canvasFrame_noise = ttk.LabelFrame(self.noise_tab, text='Noise Viewer') + #self.canvasFrame_noise.pack(fill='both')#.grid(row=0, column=0, sticky="nsew") + + #self.noise_canvas = FigureCanvasTkAgg(self.fig_noise, master=self.canvasFrame_noise) + #self.noise_canvas.draw() + #self.noise_canvasWidget = self.noise_canvas.get_tk_widget()#.pack(side=tk.TOP, fill=tk.BOTH, expand=1) + #self.noise_canvasWidget.pack(fill='both')#.grid(row=0, column=0, sticky='nsew') + + #Run button frame + runFrame_noise = ttk.Frame(self.noise_tab) + runFrame_noise.columnconfigure(0, weight=1) + + #Run area + #Progress Bar + self.noiseProgBar = ttk.Progressbar(runFrame_noise, orient='horizontal') + self.noiseProgBar.grid(row=0, column=0, sticky='ew')#.pack(fill='both',expand=True, side='left', anchor='sw') + + #Update Noise Windows button + self.style.configure(style='Noise.TButton', background='#86a5ba') + self.noise_button = ttk.Button(runFrame_noise, text="Update Noise Windows", command=plot_noise_windows, width=30, style='Noise.TButton') + + self.noise_windows_line_artists = [] + self.noise_windows_window_artists = [] + + self.style.configure('Run.TButton', background='#8b9685', width=10, height=3) + self.run_button = ttk.Button(runFrame_noise, text="Run", style='Run.TButton', command=process_data) + self.noise_button.grid(row=0, column=1, sticky='nsew', padx=2.5)#.pack(side='right', anchor='se') + self.run_button.grid(row=0, column=2, sticky='nsew', padx=2.5)#.pack(side='right', anchor='se', padx=(10,0)) + + runFrame_noise.pack(fill='both',side='bottom', anchor='e') + + + def update_remove_noise_call(): + if 'prevAutoState' not in dir(self): + self.prevAutoState=self.do_auto.get() + + if self.prevAutoState and not self.do_auto.get(): + self.do_stalta.set(False) + self.do_pctThresh.set(False) + self.do_noiseWin.set(False) + self.do_warmup.set(False) + #Get method + remMethDict = {'auto':self.do_auto.get(), + 'stalta':self.do_stalta.get(), + 'sat_per':self.do_pctThresh.get(), + 'noise_per':self.do_noiseWin.get(), + 'warmcool':self.do_warmup.get(), + } + + remMethList = [] + for k, v in remMethDict.items(): + if v: + if k=='auto': + remMethList = ['auto'] + break + remMethList.append(k) + + if len(remMethList)==1: + remMethList = remMethList[0] + + if remMethList=='auto': + remMethList = 'auto' + self.do_auto.set(True) + set_auto() + + if len(remMethList)==0: + remMethList=None + + self.remove_noise_call.configure(text="remove_noise(hvsr_data, remove_method={}, sat_percent={}, noise_percent={}, sta={}, lta={}, stalta_thresh=[{},{}], warmup_time={}, cooldown_time={}, min_win_size={}, remove_raw_noise={})".format( + remMethList, self.pct.get(), self.noise_amp_pct.get(), self.sta.get(), self.lta.get(), self.stalta_thresh_low.get(), self.stalta_thresh_hi.get(), + self.warmup_time.get(), self.cooldown_time.get(), self.win_size_thresh.get(),self.use_raw_data.get() )) + + self.prevAutoState = self.do_auto.get() + + + #remove_noise Frame + removeNoiseFrame = ttk.LabelFrame(self.noise_tab, text='remove_noise() call') + + self.remove_noise_call = ttk.Label(master=removeNoiseFrame, text="remove_noise(hvsr_data, remove_method='auto',sat_percent=0.995, noise_percent=0.80, sta=2, lta=30, stalta_thresh=[0.5,5], warmup_time=0, cooldown_time=0, min_win_size=1, remove_raw_noise=False)") + self.remove_noise_call.grid(row=0, column=0, padx=5, pady=(0,5)) + removeNoiseFrame.pack(fill='both', side='bottom')#.grid(row=0, column=1, sticky='nsew') + + noiseFrame = ttk.LabelFrame(self.noise_tab, text='Noise Removal') + noiseFrame.pack(fill='both')#.grid(row=1, columnspan=2, sticky='nsew') + + #Options for doing stalta antitrigger for noise removal + stltaremoveFrame = ttk.LabelFrame(noiseFrame, text='STA/LTA Antitrigger') + stltaremoveFrame.grid(row=0, column=0, columnspan=1, sticky='nsew') + + self.do_stalta = tk.BooleanVar() + staltaBool = ttk.Checkbutton(master=stltaremoveFrame, text="", variable=self.do_stalta, command=update_remove_noise_call) # create the Checkbutton widget + staltaBool.grid(row=0, column=0, sticky='ew') + + ttk.Label(master=stltaremoveFrame, text="STA [s]").grid(row=0, column=1) + self.sta = tk.DoubleVar() + self.sta.set(5) + staEntry = ttk.Entry(master=stltaremoveFrame, textvariable=self.sta, width=5, validate='focusout', validatecommand=update_remove_noise_call) # create the Entry widget + staEntry.grid(row=0, column=2, sticky='ew', padx=(5,10)) + + ttk.Label(master=stltaremoveFrame, text="LTA [s]").grid(row=0, column=3) + self.lta = tk.DoubleVar() + self.lta.set(30) + ltaEntry = ttk.Entry(master=stltaremoveFrame, textvariable=self.lta, width=5, validate='focusout', validatecommand=update_remove_noise_call) # create the Entry widget + ltaEntry.grid(row=0, column=4, sticky='ew', padx=(5,10)) + + ttk.Label(master=stltaremoveFrame, text="STA/LTA Thresholds (Low, High)").grid(row=0, column=5) + self.stalta_thresh_low = tk.DoubleVar() + self.stalta_thresh_low.set(0.5) + staltaLowEntry = ttk.Entry(master=stltaremoveFrame, textvariable=self.stalta_thresh_low, width=5, validate='focusout', validatecommand=update_remove_noise_call) # create the Entry widget + staltaLowEntry.grid(row=0, column=6, sticky='ew', padx=(5,0)) + + self.stalta_thresh_hi = tk.DoubleVar() + self.stalta_thresh_hi.set(5) + staltaHiEntry = ttk.Entry(master=stltaremoveFrame, textvariable=self.stalta_thresh_hi, width=5, validate='focusout', validatecommand=update_remove_noise_call) # create the Entry widget + staltaHiEntry.grid(row=0, column=7, sticky='ew') + + #Options for Percentage threshold removal + pctThresFrame = ttk.LabelFrame(noiseFrame, text='Percentage Threshold') + pctThresFrame.grid(row=1, column=0, sticky='nsew') + + self.do_pctThresh= tk.BooleanVar() + pctBool = ttk.Checkbutton(master=pctThresFrame, text="", variable=self.do_pctThresh, command=update_remove_noise_call) # create the Checkbutton widget + pctBool.grid(row=0, column=0, sticky='ew') + + ttk.Label(master=pctThresFrame, text="Max Saturation %").grid(row=0, column=1) + self.pct = tk.DoubleVar() + self.pct.set(0.995) + pctEntry = ttk.Entry(master=pctThresFrame, textvariable=self.pct, width=10, validate='focusout', validatecommand=update_remove_noise_call) # create the Entry widget + pctEntry.grid(row=0, column=2, sticky='ew', padx=(5,10)) + + ttk.Label(master=pctThresFrame, text="", width=27).grid(row=0, column=3, columnspan=2) + + #Options for noisy window + noisyWindowFrame = ttk.LabelFrame(noiseFrame, text='Noisy Windows') + noisyWindowFrame.grid(row=2, column=0, sticky='nsew') + + self.do_noiseWin = tk.BooleanVar() + winNoiseBool = ttk.Checkbutton(master=noisyWindowFrame, text="", variable=self.do_noiseWin, command=update_remove_noise_call) # create the Checkbutton widget + winNoiseBool.grid(row=0, column=0, sticky='ew') + + ttk.Label(master=noisyWindowFrame, text="Max Window %").grid(row=0, column=1) + self.noise_amp_pct = tk.DoubleVar() + self.noise_amp_pct.set(0.80) + winamppctEntry = ttk.Entry(master=noisyWindowFrame, textvariable=self.noise_amp_pct, width=10, validate='focusout', validatecommand=update_remove_noise_call) # create the Entry widget + winamppctEntry.grid(row=0, column=2, sticky='ew', padx=(5,10)) + + ttk.Label(master=noisyWindowFrame, text="Window Length [sec]").grid(row=0, column=3) + self.lta_noise = tk.DoubleVar() + self.lta_noise.set(30) + winamppctEntry = ttk.Entry(master=noisyWindowFrame, textvariable=self.lta_noise, width=10, validate='focusout', validatecommand=update_remove_noise_call) # create the Entry widget + winamppctEntry.grid(row=0, column=4, sticky='ew', padx=(5,10)) + + ttk.Label(master=noisyWindowFrame, text="Min. Window Size [sec]").grid(row=0, column=5) + self.win_size_thresh = tk.DoubleVar() + self.win_size_thresh.set(0) + win_size_Entry = ttk.Entry(master=noisyWindowFrame, textvariable=self.win_size_thresh, width=10, validate='focusout', validatecommand=update_remove_noise_call) # create the Entry widget + win_size_Entry.grid(row=0, column=6, sticky='e', padx=(5,10)) + + #Options for warmup + warmupFrame = ttk.LabelFrame(noiseFrame, text='Warmup & Cooldown Time') + warmupFrame.grid(row=0, column=1, sticky='nsew') + + self.do_warmup= tk.BooleanVar() + warmupBool = ttk.Checkbutton(master=warmupFrame, text="", variable=self.do_warmup, command=update_remove_noise_call) # create the Checkbutton widget + warmupBool.grid(row=0, column=0, sticky='ew') + + ttk.Label(master=warmupFrame, text="Warmup time [s]").grid(row=0, column=1) + self.warmup_time = tk.DoubleVar() + warmupEntry = ttk.Entry(master=warmupFrame, textvariable=self.warmup_time, width=10, validate='focusout', validatecommand=update_remove_noise_call) # create the Entry widget + warmupEntry.grid(row=0, column=2, sticky='ew', padx=(5,10)) + warmupEntry.delete(0, 'end') + warmupEntry.insert(0, '0') + + ttk.Label(master=warmupFrame, text="Cooldown Time [s]").grid(row=0, column=3) + self.cooldown_time = tk.DoubleVar() + cooldownEntry = ttk.Entry(master=warmupFrame, textvariable=self.cooldown_time, width=10, validate='focusout', validatecommand=update_remove_noise_call) # create the Entry widget + cooldownEntry.grid(row=0, column=5, sticky='ew', padx=(5,10)) + cooldownEntry.delete(0, 'end') + cooldownEntry.insert(0, '0') + + #Options for doing stdev noise removal + stdremoveFrame = ttk.LabelFrame(noiseFrame, text='Standard Deviation Antitrigger (not yet implemented)') + stdremoveFrame.grid(row=1, column=1, columnspan=1, sticky='nsew') + + self.do_stdev = tk.BooleanVar() + stdBool = ttk.Checkbutton(master=stdremoveFrame, text="", variable=self.do_stdev, state='disabled', command=update_remove_noise_call) # create the Checkbutton widget + stdBool.grid(row=0, column=0, sticky='ew') + + ttk.Label(master=stdremoveFrame, text="Std Deviation Ratio (moving stdev/total stdev)").grid(row=0, column=1) + self.stdRatio = tk.DoubleVar() + stdRatEntry = ttk.Entry(master=stdremoveFrame, textvariable=self.stdRatio, width=5, state='disabled', validate='focusout', validatecommand=update_remove_noise_call) # create the Entry widget + stdRatEntry.grid(row=0, column=2, sticky='ew', padx=(5,10)) + stdRatEntry.delete(0, 'end') + stdRatEntry.insert(0, '1') + + ttk.Label(master=stdremoveFrame, text="Window Length [s]").grid(row=0, column=3) + self.stdWinLen = tk.DoubleVar() + stdWinLenEntry = ttk.Entry(master=stdremoveFrame, textvariable=self.stdWinLen, width=5, state='disabled',validate='focusout', validatecommand=update_remove_noise_call) # create the Entry widget + stdWinLenEntry.grid(row=0, column=4, sticky='ew', padx=(5,10)) + stdWinLenEntry.delete(0, 'end') + stdWinLenEntry.insert(0, '5') + + #Quick set the auto + #autoFrame = ttk.LabelFrame(noiseFrame, text='Auto Run') + #autoFrame.grid(row=2, column=1, columnspan=1, sticky='nsew') + + + def set_auto(): + if self.do_auto.get(): + self.do_stalta.set(True) + self.do_stdev.set(True) + self.do_warmup.set(True) + self.do_noiseWin.set(True) + self.do_pctThresh.set(True) + else: + self.do_stalta.set(False) + self.do_stdev.set(False) + self.do_warmup.set(False) + self.do_noiseWin.set(False) + self.do_pctThresh.set(False) + pass + + + #Additional options + addOptionsFrame = ttk.LabelFrame(noiseFrame, text='') + addOptionsFrame.grid(row=2, column=1, columnspan=1, sticky='nsew') + + + self.do_auto= tk.BooleanVar() + autoBool = ttk.Checkbutton(master=addOptionsFrame, text="Set Auto Run", variable=self.do_auto, command=update_remove_noise_call) # create the Checkbutton widget + autoBool.grid(row=0, column=0, sticky='nsew', padx=5) + + self.use_raw_data= tk.BooleanVar() + rawDataRemoveBool = ttk.Checkbutton(master=addOptionsFrame, text="Remove from raw data", variable=self.use_raw_data, command=update_remove_noise_call) # create the Checkbutton widget + rawDataRemoveBool.grid(row=0, column=1, sticky='nsew', padx=5) + + #Export noise windows + ttk.Label(noiseFrame, text="Export Figure").grid(row=4, column=0, sticky='ew', padx=5) + self.results_noise_dir = tk.StringVar() + self.results_noise_dir_entry = ttk.Entry(noiseFrame, textvariable=self.results_noise_dir) + self.results_noise_dir_entry.grid(row=4, column=0, columnspan=5, sticky='ew', padx=(100,5)) + + + def filepath_noise_fig(): + filepath = filedialog.asksaveasfilename(defaultextension='png', initialdir=pathlib.Path(self.data_path.get()).parent, initialfile=self.params['site']+'_noisewindows.png') + if filepath: + self.results_noise_dir_entry.delete(4, 'end') + self.results_noise_dir_entry.insert(4, filepath) + + + def save_noise_fig(): + self.fig_noise.savefig(self.results_noise_dir.get()) + + self.browse_noise_fig = ttk.Button(noiseFrame, text="Browse",command=filepath_noise_fig) + self.browse_noise_fig.grid(row=4, column=7, sticky='ew', padx=2.5) + + self.save_noise_fig = ttk.Button(noiseFrame, text="Save",command=save_noise_fig) + self.save_noise_fig.grid(row=4, column=8, columnspan=2, sticky='ew', padx=2.5) + + self.noise_tab.pack(expand=1) + self.tab_control.add(self.noise_tab, text="Noise") + + # SETTINGS TAB + self.settings_tab = ttk.Frame(self.tab_control) + + self.tab_control.add(self.settings_tab, text="Settings") + + # Create a new Notebook widget within the Settings tab + settings_notebook = ttk.Notebook(self.settings_tab) + + # Create the tabs within the Settings tab + #PPSD SETTINGS SUBTAB + ppsd_settings_tab = ttk.Frame(settings_notebook) + ppsdSettingsFrame = ttk.LabelFrame(ppsd_settings_tab, text='Input Settings')#.pack(fill='both') + ppsdParamsFrame = ttk.LabelFrame(ppsd_settings_tab, text='PPSD Parameters')#.pack(fill='both') + + # ppsd_length=30.0 + + def on_ppsd_length(): + try: + float(self.ppsd_length.get()) + ppsdLenLabel.configure(text='ppsd_length={}'.format(self.ppsd_length.get())) + update_ppsd_call(self.ppsd_call) + return True + except ValueError: + return False + ppsdLenLabel = ttk.Label(master=ppsdParamsFrame, text='ppsd_length=30.0 ')#.grid(row=0, column=0) + ppsdLenLabel.grid(row=0, column=0, sticky='w', pady=(6,6), padx=5) + + ttk.Label(master=ppsdSettingsFrame, text='PPSD Length (in seconds) [float]').grid(row=0, column=0, sticky='w', padx=5) + self.ppsd_length = tk.DoubleVar() + self.ppsd_length.set(30) + ppsdLenEntry = ttk.Entry(master=ppsdSettingsFrame, textvariable=self.ppsd_length, width=10, validate='focusout', validatecommand=on_ppsd_length) + ppsdLenEntry.grid(row=0, column=1, sticky='w', padx=(5, 10)) + + # overlap=0.5, + + def on_overlap(): + try: + overlap = float(self.overlap.get()) + if overlap > 1: + self.overlap.set(overlap/100) + overlapLabel.configure(text='overlap={}'.format(self.overlap.get())) + update_ppsd_call(self.ppsd_call) + return True + except ValueError: + return False + overlapLabel = ttk.Label(master=ppsdParamsFrame, text='overlap=0.5 ')#.grid(row=0, column=0) + overlapLabel.grid(row=1, column=0, sticky='ew', pady=(6,6), padx=5) + + ttk.Label(master=ppsdSettingsFrame, text='Overlap % (0-1) [float]').grid(row=1, column=0, sticky='w', padx=5) + self.overlap = tk.DoubleVar() + self.overlap.set(0.5) + overlapEntry = ttk.Entry(master=ppsdSettingsFrame, textvariable=self.overlap, width=10, validate='focusout', validatecommand=on_overlap) + overlapEntry.grid(row=1, column=1, sticky='w', padx=(5, 10)) + + # period_step_octaves=0.0625, + + def on_per_step_oct(): + try: + float(self.perStepOct.get()) + + pStepOctLabel.configure(text='period_step_octaves={}'.format(self.perStepOct.get())) + update_ppsd_call(self.ppsd_call) + return True + except ValueError: + return False + pStepOctLabel = ttk.Label(master=ppsdParamsFrame, text='period_step_octaves=0.0625')#.grid(row=0, column=0) + pStepOctLabel.grid(row=2, column=0, sticky='ew', pady=(6,6), padx=5) + + ttk.Label(master=ppsdSettingsFrame, text='Period Step Octave [float]').grid(row=2, column=0, sticky='w', padx=5) + self.perStepOct = tk.DoubleVar() + self.perStepOct.set(0.0625) + pStepOctEntry = ttk.Entry(master=ppsdSettingsFrame, textvariable=self.perStepOct, width=10, validate='focusout', validatecommand=on_per_step_oct) + pStepOctEntry.grid(row=2, column=1, sticky='w', padx=(5, 10)) + + #skip_on_gaps + + def show_sog(): + if self.skip_on_gaps.get(): + sogLabel.configure(text ='skip_on_gaps=True') + else: + sogLabel.configure(text ='skip_on_gaps=False') + update_ppsd_call(self.ppsd_call) + + self.skip_on_gaps = tk.BooleanVar() + ttk.Label(master=ppsdSettingsFrame, text='Skip on Gaps [bool]: ', justify='left').grid(row=3, column=0, sticky='w', padx=5) + sogCheckButton = ttk.Checkbutton(master=ppsdSettingsFrame, text='', variable=self.skip_on_gaps, command=show_sog) # create the Entry widget + sogCheckButton.grid(row=3, column=1, sticky='ew', padx=(5,10)) + sogLabel = ttk.Label(master=ppsdParamsFrame, text='skip_on_gaps=False') + sogLabel.grid(row=3, column=0, sticky='ew', pady=(6,6), padx=5) + + # db_bins=(-200, -50, 1.0), + + def show_dbbins(): + try: + float(minDB.get()) + float(maxDB.get()) + float(dB_step.get()) + dbbinsLabel.configure(text='db_bins=({}, {}, {})'.format( + minDB.get(), maxDB.get(), dB_step.get())) + self.db_bins = (minDB.get(), maxDB.get(), dB_step.get()) + update_ppsd_call(self.ppsd_call) + return True + except ValueError: + return False + + dbbinsLabel = ttk.Label(master=ppsdParamsFrame, + text='db_bins=(-200, -50, 1.0)') + dbbinsLabel.grid(row=4, column=0, sticky='ew', pady=(6,6), padx=5) + ttk.Label(master=ppsdSettingsFrame, text='dB Bins (Y Axis) [tuple]', justify='left').grid(row=4, column=0, sticky='w', padx=5) + + ttk.Label(master=ppsdSettingsFrame, text='Min. dB').grid(row=4, column=1, sticky='e', padx=5) + minDB = tk.DoubleVar() + minDB.set(-200) + minDBEntry = ttk.Entry(master=ppsdSettingsFrame, textvariable=minDB, + validate="focusout", validatecommand=show_dbbins, width=10) + minDBEntry.grid(row=4, column=2, sticky='w', padx=(5, 10)) + + ttk.Label(master=ppsdSettingsFrame, text='Max. dB').grid(row=4, column=3, sticky='e', padx=5) + maxDB = tk.DoubleVar() + maxDB.set(-50) + maxDBEntry = ttk.Entry(master=ppsdSettingsFrame, textvariable=maxDB, + validate="focusout", validatecommand=show_dbbins, width=10) + maxDBEntry.grid(row=4, column=4, sticky='w', padx=(5, 10)) + + ttk.Label(master=ppsdSettingsFrame, text='dB Step').grid(row=4, column=5, sticky='e', padx=5) + dB_step = tk.DoubleVar() + dB_step.set(1.0) + stepEntry = ttk.Entry(master=ppsdSettingsFrame, textvariable=dB_step, + validate="focusout", validatecommand=(show_dbbins), width=10) + stepEntry.grid(row=4, column=6, sticky='w', padx=(5, 10)) + self.db_bins = (minDB.get(), maxDB.get(), dB_step.get()) + + # period_limits=None, + + def show_per_lims(): + try: + if minPerLim.get() == 'None': + pass + else: + float(minPerLim.get()) + + if maxPerLim.get() == 'None': + pass + else: + float(maxPerLim.get()) + + if minPerLim.get() == 'None' or maxPerLim.get() == 'None': + perLimsLabel.configure(text='period_limits=None') + else: + perLimsLabel.configure(text='period_limits=[{}, {}]'.format(minPerLim.get(), maxPerLim.get())) + self.period_limits = [float(minPerLim.get()), float(maxPerLim.get())] + update_ppsd_call(self.ppsd_call) + return True + except ValueError: + return False + + perLimsLabel = ttk.Label(master=ppsdParamsFrame, + text='period_limits=None') + perLimsLabel.grid(row=5, column=0, sticky='ew', pady=(6,6), padx=5) + ttk.Label(master=ppsdSettingsFrame, text='Period Limits [list of floats or None]', justify='left').grid(row=5, column=0, sticky='w', padx=5) + + ttk.Label(master=ppsdSettingsFrame, text='Min. Period Limit').grid(row=5, column=1, sticky='e', padx=5) + minPerLim = tk.StringVar() + minPerLim.set(None) + minPerLimEntry = ttk.Entry(master=ppsdSettingsFrame, textvariable=minPerLim, + validate="focusout", validatecommand=(show_per_lims), width=10) + minPerLimEntry.grid(row=5, column=2, sticky='w', padx=(5, 10)) + + ttk.Label(master=ppsdSettingsFrame, text='Max. Period Limit').grid(row=5, column=3, sticky='e', padx=5) + maxPerLim = tk.StringVar() + maxPerLim.set(None) + maxPerLimEntry = ttk.Entry(master=ppsdSettingsFrame, textvariable=maxPerLim, + validate="focusout", validatecommand=(show_per_lims), width=10) + maxPerLimEntry.grid(row=5, column=4, sticky='w', padx=(5, 10)) + + if minPerLim.get() == 'None' or maxPerLim.get() == 'None': + self.period_limits = None + else: + self.period_limits = [float(minPerLim.get()), float(maxPerLim.get())] + + # period_smoothing_width_octaves=1.0, + + def on_per_smoothwidth_oct(): + try: + float(self.perSmoothWidthOct.get()) + + pSmoothWidthLabel.configure(text='period_smoothing_width_octaves={}'.format(self.perSmoothWidthOct.get())) + update_ppsd_call(self.ppsd_call) + return True + except ValueError: + return False + pSmoothWidthLabel = ttk.Label(master=ppsdParamsFrame, text='period_smoothing_width_octaves=1.0')#.grid(row=0, column=0) + pSmoothWidthLabel.grid(row=6, column=0, sticky='ew', pady=(6,6), padx=5) + + ttk.Label(master=ppsdSettingsFrame, text='Period Smoothing Width (octaves) [float]').grid(row=6, column=0, sticky='w', padx=5) + self.perSmoothWidthOct = tk.DoubleVar() + self.perSmoothWidthOct.set(1.0) + pSmoothWidthEntry = ttk.Entry(master=ppsdSettingsFrame, textvariable=self.perSmoothWidthOct, width=10, validate='focusout', validatecommand=on_per_smoothwidth_oct) + pSmoothWidthEntry.grid(row=6, column=1, sticky='w', padx=(5, 10)) + + # special_handling=None, + + def on_special_handling(): + try: + str(self.special_handling.get()) + if self.special_handling.get() == 'None': + specialHandlingLabel.configure(text="special_handling={}".format(self.special_handling.get())) + special_handling = None + else: + specialHandlingLabel.configure(text="special_handling='{}'".format(self.special_handling.get())) + special_handling = self.special_handling.get() + update_ppsd_call(self.ppsd_call) + return True + except ValueError: + return False + + specialHandlingLabel = ttk.Label(master=ppsdParamsFrame, text="special_handling=None") + specialHandlingLabel.grid(row=7, column=0, sticky='ew', pady=(6,6), padx=5) + + ttk.Label(master=ppsdSettingsFrame, text='Special Handling [str]').grid(row=7, column=0, sticky='w', padx=5) + + self.special_handling = tk.StringVar() + self.special_handling.set('None') + ttk.Radiobutton(master=ppsdSettingsFrame, text='None', variable=self.special_handling, value='None', command=on_special_handling).grid(row=7, column=1, sticky='w', padx=(5, 10)) + ttk.Radiobutton(master=ppsdSettingsFrame, text='Ringlaser', variable=self.special_handling, value='ringlaser', command=on_special_handling).grid(row=7, column=2, sticky='w', padx=(5, 10)) + ttk.Radiobutton(master=ppsdSettingsFrame, text='Hydrophone', variable=self.special_handling, value='hydrophone', command=on_special_handling).grid(row=7, column=3, sticky='w', padx=(5, 10)) + + if self.special_handling.get()=='None': + special_handling = None + else: + special_handling = self.special_handling.get() + + separator = ttk.Separator(ppsdSettingsFrame, orient='horizontal') + separator.grid(row=8, columnspan=8, sticky='ew', pady=10, padx=5) + + separator = ttk.Separator(ppsdParamsFrame, orient='horizontal') + separator.grid(row=8, sticky='ew', pady=10, padx=5) + + #remove_outliers + + def show_rem_outliers(): + if self.remove_outliers.get(): + rem_outliers_Label.configure(text ='remove_outliers=True') + else: + rem_outliers_Label.configure(text ='remove_outliers=False') + update_ppsd_call(self.ppsd_call) + + self.remove_outliers = tk.BooleanVar() + self.remove_outliers.set(True) + ttk.Label(master=ppsdSettingsFrame, text='Remove outlier curves [bool]: ', justify='left').grid(row=9, column=0, sticky='w', padx=5) + rem_outliers_CheckButton = ttk.Checkbutton(master=ppsdSettingsFrame, text='', variable=self.remove_outliers, command=show_rem_outliers) # create the Entry widget + rem_outliers_CheckButton.grid(row=9, column=1, sticky='ew', padx=(5,10)) + rem_outliers_Label = ttk.Label(master=ppsdParamsFrame, text='remove_outliers=True') + rem_outliers_Label.grid(row=9, column=0, sticky='ew', pady=(6,6), padx=5) + + # outlier_std=1.5, + + def on_outlier_std(): + try: + float(self.outlier_std.get()) + outlier_std_Label.configure(text='outlier_std={}'.format(self.outlier_std.get())) + update_ppsd_call(self.ppsd_call) + return True + except ValueError: + return False + outlier_std_Label = ttk.Label(master=ppsdParamsFrame, text='outlier_std=1.5')#.grid(row=0, column=0) + outlier_std_Label.grid(row=10, column=0, sticky='ew', pady=(6,6), padx=5) + + ttk.Label(master=ppsdSettingsFrame, text='St. Dev. for Outliers [float]').grid(row=10, column=0, sticky='w', padx=5) + self.outlier_std = tk.DoubleVar() + self.outlier_std.set(1.5) + outlier_std_Entry = ttk.Entry(master=ppsdSettingsFrame, textvariable=self.outlier_std, width=10, validate='focusout', validatecommand=on_outlier_std) + outlier_std_Entry.grid(row=10, column=1, sticky='w', padx=(5, 10)) + + + #PPSD Function Call + ppsdCallFrame = ttk.LabelFrame(ppsd_settings_tab, text='sprit_hvsr.generate_ppsds() and obspy PPSD() call')#.pack(fill='both') + + self.ppsd_call = ttk.Label(master=ppsdCallFrame, text='obspy...PPSD({}, {}, {}, {}, {}, {}, \n\t{}, {}, {}, {})' + .format('stats', 'metadata', ppsdLenLabel.cget('text'), overlapLabel.cget('text'), pStepOctLabel.cget('text'), sogLabel.cget('text'), + dbbinsLabel.cget('text'), perLimsLabel.cget('text'), pSmoothWidthLabel.cget('text'), specialHandlingLabel.cget('text'))) + self.ppsd_call.pack(side='bottom', anchor='w', padx=(25,0), pady=(10,10)) + + self.generate_ppsd_call = ttk.Label(master=ppsdCallFrame, text='generate_ppsds({}, remove_outliers={}, outlier_std={},...\n\t{}, {}, {}, {}, {}, \n\t{}, {}, {})' + .format('hvsr_data', self.remove_outliers.get(), self.outlier_std.get(), + ppsdLenLabel.cget('text'), overlapLabel.cget('text'), pStepOctLabel.cget('text'), sogLabel.cget('text'), + dbbinsLabel.cget('text'), perLimsLabel.cget('text'), pSmoothWidthLabel.cget('text'), specialHandlingLabel.cget('text'))) + self.generate_ppsd_call.pack(side='bottom', anchor='w', padx=(25,0), pady=(10,10)) + + + def update_ppsd_call(ppsd_call): + ppsd_call.configure(text='obspy...PPSD({}, {}, {}, {}, {}, {}, \n\t{}, {}, {}, {})'.format('stats', 'metadata', ppsdLenLabel.cget('text'), + overlapLabel.cget('text'), pStepOctLabel.cget('text'), sogLabel.cget('text'), + dbbinsLabel.cget('text'), perLimsLabel.cget('text'), pSmoothWidthLabel.cget('text'), specialHandlingLabel.cget('text'))) + + self.generate_ppsd_call.configure(text='generate_ppsds({}, remove_outliers={}, outlier_std={},...\n\t{}, {}, {}, {}, {}, \n\t{}, {}, {})' + .format('hvsr_data', self.remove_outliers.get(), self.outlier_std.get(), + ppsdLenLabel.cget('text'), overlapLabel.cget('text'), pStepOctLabel.cget('text'), sogLabel.cget('text'), + dbbinsLabel.cget('text'), perLimsLabel.cget('text'), pSmoothWidthLabel.cget('text'), specialHandlingLabel.cget('text'))) + + + #Stats from trace(s) + obspyStatsFrame = ttk.LabelFrame(ppsd_settings_tab, text='Data Trace Stats')#.pack(fill='both') + self.obspySreamLabel_settings = ttk.Label(obspyStatsFrame, text='Stats') + self.obspySreamLabel_settings.pack(anchor='nw', padx=5) + + #Metadata (PAZ) + obspyMetadataFrame = ttk.LabelFrame(ppsd_settings_tab, text='Metadata Poles and Zeros')#.pack(fill='both') + + self.metadataZ_settings = ttk.Label(obspyMetadataFrame, text='Z: ') + self.metadataZ_settings.grid(row=1, column=0, padx=5) + self.metadataZ_settings.configure(font=("TkDefaultFont", 10, 'underline', 'bold')) + self.sensitivityLabelZ_settings = ttk.Label(obspyMetadataFrame, text='Sensitivity_Z') + self.sensitivityLabelZ_settings.grid(row=1, column=1, padx=5) + self.gainLabelZ_settings = ttk.Label(obspyMetadataFrame, text='Gain_Z') + self.gainLabelZ_settings.grid(row=1, column=2, padx=5) + self.polesLabelZ_settings = ttk.Label(obspyMetadataFrame, text='Poles_Z') + self.polesLabelZ_settings.grid(row=1, column=3, padx=5) + self.zerosLabelZ_settings = ttk.Label(obspyMetadataFrame, text='Zeros_Z') + self.zerosLabelZ_settings.grid(row=1, column=4, padx=5) + + self.metadataN_settings = ttk.Label(obspyMetadataFrame, text='N: ') + self.metadataN_settings.grid(row=2, column=0, padx=5) + self.metadataN_settings.configure(font=("TkDefaultFont", 10, 'underline', 'bold')) + self.sensitivityLabelN_settings = ttk.Label(obspyMetadataFrame, text='Sensitivity_N') + self.sensitivityLabelN_settings.grid(row=2, column=1, padx=5) + self.gainLabelN_settings = ttk.Label(obspyMetadataFrame, text='Gain_N') + self.gainLabelN_settings.grid(row=2, column=2, padx=5) + self.polesLabelN_settings = ttk.Label(obspyMetadataFrame, text='Poles_N') + self.polesLabelN_settings.grid(row=2, column=3, padx=5) + self.zerosLabelN_settings = ttk.Label(obspyMetadataFrame, text='Zeros_N') + self.zerosLabelN_settings.grid(row=2, column=4, padx=5) + + self.metadataE_settings = ttk.Label(obspyMetadataFrame, text='E: ') + self.metadataE_settings.grid(row=3, column=0, padx=5) + self.metadataE_settings.configure(font=("TkDefaultFont", 10, 'underline', 'bold')) + self.sensitivityLabelE_settings = ttk.Label(obspyMetadataFrame, text='Sensitivity_E') + self.sensitivityLabelE_settings.grid(row=3, column=1) + self.gainLabelE_settings = ttk.Label(obspyMetadataFrame, text='Gain_E') + self.gainLabelE_settings.grid(row=3, column=2, padx=5) + self.polesLabelE_settings = ttk.Label(obspyMetadataFrame, text='Poles_E') + self.polesLabelE_settings.grid(row=3, column=3, padx=5) + self.zerosLabelE_settings = ttk.Label(obspyMetadataFrame, text='Zeros_E') + self.zerosLabelE_settings.grid(row=3, column=4, padx=5) + + self.metadata_sensitivity = ttk.Label(obspyMetadataFrame, text='Sensitivity') + self.metadata_sensitivity.grid(row=0, column=1, padx=5) + self.metadata_sensitivity.configure(font=("TkDefaultFont", 10, 'underline', 'bold')) + + self.metadata_gain = ttk.Label(obspyMetadataFrame, text='Gain') + self.metadata_gain.grid(row=0, column=2, padx=5) + self.metadata_gain.configure(font=("TkDefaultFont", 10, 'underline', 'bold')) + + self.metadata_poles = ttk.Label(obspyMetadataFrame, text='Poles') + self.metadata_poles.grid(row=0, column=3, padx=5) + self.metadata_poles.configure(font=("TkDefaultFont", 10, 'underline', 'bold')) + + self.metadata_zeros = ttk.Label(obspyMetadataFrame, text='Zeros') + self.metadata_zeros.grid(row=0, column=4, padx=5) + self.metadata_zeros.configure(font=("TkDefaultFont", 10, 'underline', 'bold')) + + #Run button frame + runFrame_set_ppsd = ttk.Frame(ppsd_settings_tab) + self.run_button = ttk.Button(runFrame_set_ppsd, text="Run", style='Run.TButton', command=process_data) + self.run_button.grid(row=0, column=11, sticky='ew', padx=2.5) + + self.settingsProgBar_ppsd = ttk.Progressbar(runFrame_set_ppsd, orient='horizontal') + self.settingsProgBar_ppsd.grid(row=0, column=0, columnspan=10, sticky='ew') + runFrame_set_ppsd.columnconfigure(0, weight=1) + + runFrame_set_ppsd.pack(fill='both', side='bottom', anchor='e') + obspyMetadataFrame.pack(fill='both', side='bottom',expand=True)#.grid(row=7, column=0, columnspan=6, sticky='nsew')#.pack(side='bottom', fill='both', anchor='n', expand=True) + obspyStatsFrame.pack(fill='both', side='bottom',expand=True)#.grid(row=6, column=0, columnspan=6, sticky='nsew')#.pack(side='bottom', fill='both', anchor='n', expand=True) + ppsdCallFrame.pack(fill='both', side='bottom',expand=True)#row=5, column=0, columnspan=6, sticky='nsew')#.pack(side='bottom', fill='both', anchor='n', expand=True) + ppsdParamsFrame.pack(fill='both', side='right')#.grid(row=0, column=5, rowspan=4, sticky='nsew')#.pack(side='right',fill='both', anchor='n', expand=True) + ppsdSettingsFrame.pack(fill='both', expand=True, side='top', anchor='w')#.grid(row=0, column=0, columnspan=4, rowspan=4, sticky='nsew')#.pack(side='left', fill='both', anchor='n', expand=True) + + ppsd_settings_tab.pack(fill='both', expand=True) + settings_notebook.add(ppsd_settings_tab, text="PPSD") + + #HVSR SETTINGS SUBTAB + hvsr_settings_tab = ttk.Frame(settings_notebook) + + hvsrSettingsFrame = ttk.LabelFrame(hvsr_settings_tab, text='H/V Processing Settings')#.pack(fill='both') + + hvsrParamsFrame = ttk.LabelFrame(hvsr_settings_tab, text='Process HVSR Parameters')#.pack(fill='both') + + #Method selection, method=4 + ttk.Label(hvsrSettingsFrame, text="Horizontal Combine Method [int]").grid(row=0, column=0, padx=(5,0), sticky='w') + method_options = ['', #Empty to make intuitive and match sprit_hvsr.py + "1.Diffuse Field Assumption (not currently implemented)", + "2. Arithmetic Mean H ≡ (N + E)/2", + "3. Geometric Mean: H ≡ √(N · E) (recommended by SESEAME Project (2004))", + "4. Vector Summation: H ≡ √(N^2 + E^2)", + "5. Quadratic Mean: H ≡ √(N^2 + E^2)/2", + "6. Maximum Horizontal Value: H ≡ max(N, E)" + ] + + + def on_method_select(meth, meth_opts=method_options): + self.method_ind = meth_opts.index(meth) + + try: + int(self.method_ind) + hCombMethodLabel.configure(text="method={}".format(self.method_ind)) + update_procHVSR_call(self.procHVSR_call) + return True + except ValueError: + return False + + defaultMeth=3 + hCombMethodLabel = ttk.Label(master=hvsrParamsFrame, text="method={}".format(defaultMeth), width=30) + hCombMethodLabel.grid(row=0, column=0, sticky='ew', pady=(6,6), padx=5) + + self.method_sel = tk.StringVar(value=method_options[defaultMeth]) + self.method_ind = method_options.index(self.method_sel.get()) + self.method_dropdown = ttk.OptionMenu(hvsrSettingsFrame, self.method_sel, method_options[defaultMeth], *method_options, command=on_method_select) + self.method_dropdown.config(width=50) + self.method_dropdown.grid(row=0, column=1, columnspan=8, sticky='ew') + + #smooth=True, + + def curve_smooth(): + try: + int(self.hvsmooth.get()) + bool(self.hvsmoothbool.get()) + if not self.hvsmoothbool.get(): + hvSmoothLabel.configure(text='smooth={}'.format(self.hvsmoothbool.get())) + self.hvsmooth_param = False + else: + hvSmoothLabel.configure(text='smooth={}'.format(self.hvsmooth.get())) + self.hvsmooth_param = self.hvsmooth.get() + update_procHVSR_call(self.procHVSR_call) + return True + except ValueError: + return False + + hvSmoothLabel = ttk.Label(master=hvsrParamsFrame, text="smooth=True", width=30) + hvSmoothLabel.grid(row=1, column=0, sticky='ew', pady=(6,6), padx=5) + + ttk.Label(master=hvsrSettingsFrame, text='Smooth H/V Curve [bool]').grid(row=1, column=0, padx=(5,0), sticky='w') + + self.hvsmoothbool = tk.BooleanVar() + self.hvsmoothbool.set(True) + self.hvsmooth_param=True + smoothCurveBool = ttk.Checkbutton(master=hvsrSettingsFrame, text="", compound='left', variable=self.hvsmoothbool, command=curve_smooth) # create the Checkbutton widget + smoothCurveBool.grid(row=1, column=1, sticky='w') + + self.hvsmooth = tk.IntVar() + self.hvsmooth.set(51) + smoothCurveSamples = ttk.Entry(master=hvsrSettingsFrame, textvariable=self.hvsmooth, width=10, validate='focusout', validatecommand=curve_smooth) + smoothCurveSamples.grid(row=1, column=2, sticky='w', padx=(5, 10)) + ttk.Label(master=hvsrSettingsFrame, text='[int] # pts in smoothing window (default=51)').grid(row=1, column=3, padx=(0,0)) + + #freq_smooth='konno ohmachi', + freqSmoothLabel = ttk.Label(master=hvsrParamsFrame, text="freq_smooth='konno ohmachi'", width=30) + freqSmoothLabel.grid(row=2, column=0, sticky='w', pady=(16,16), padx=5) + + + def on_freq_smooth(): + try: + str(self.freq_smooth.get()) + freqSmoothLabel.configure(text="freq_smooth={}".format(self.freq_smooth.get())) + update_procHVSR_call(self.procHVSR_call) + return True + except ValueError: + return False + + self.freq_smooth = tk.StringVar() + self.freq_smooth.set('konno ohmachi') + ttk.Label(master=hvsrSettingsFrame, text='Frequency Smoothing [str]').grid(row=2, column=0, padx=(5,0), sticky='w') + fsmoothOptFrame = ttk.LabelFrame(master=hvsrSettingsFrame, text='Frequency Smoothing Operations') + fsmoothOptFrame.grid(row=2, column=1, columnspan=7, padx=5, sticky='nsew') + ttk.Radiobutton(master=fsmoothOptFrame, text='Konno-Ohmachi', variable=self.freq_smooth, value='konno ohmachi', command=on_freq_smooth).grid(row=0, column=0, sticky='w', padx=(5, 10)) + ttk.Radiobutton(master=fsmoothOptFrame, text='Constant', variable=self.freq_smooth, value='constant', command=on_freq_smooth).grid(row=0, column=1, sticky='w', padx=(5, 10)) + ttk.Radiobutton(master=fsmoothOptFrame, text='Proportional', variable=self.freq_smooth, value='proportional', command=on_freq_smooth).grid(row=0, column=2, sticky='w', padx=(5, 10)) + ttk.Radiobutton(master=fsmoothOptFrame, text='None', variable=self.freq_smooth, value='None', command=on_freq_smooth).grid(row=0, column=3, sticky='w', padx=(5, 10)) + + #f_smooth_width=40, + fSmoothWidthlabel = ttk.Label(master=hvsrParamsFrame, text="f_smooth_width=40", width=30) + fSmoothWidthlabel.grid(row=3, column=0, sticky='ew', pady=(6,6), padx=5) + + + def on_smooth_width(): + try: + int(self.fSmoothWidth.get()) + fSmoothWidthlabel.configure(text='f_smooth_width={}'.format(self.fSmoothWidth.get())) + update_procHVSR_call(self.procHVSR_call) + return True + except ValueError: + return False + + ttk.Label(master=hvsrSettingsFrame, text='Bandwidth of freq. smoothing [int]').grid(row=3, column=0, padx=(5,0), sticky='w') + self.fSmoothWidth = tk.IntVar() + self.fSmoothWidth.set(40) + fSmoothWidthEntry = ttk.Entry(master=hvsrSettingsFrame, justify='left', textvariable=self.fSmoothWidth, validate='focusout', validatecommand=on_smooth_width, width=10) + fSmoothWidthEntry.grid(row=3, column=1, sticky='w', padx=(5, 10)) + + #resample=True, + resampleLabel = ttk.Label(master=hvsrParamsFrame, text="resample=True", width=30) + resampleLabel.grid(row=4, column=0, sticky='ew', pady=(6,6), padx=5) + + def on_curve_resample(): + try: + if not self.resamplebool.get(): + resampleLabel.configure(text='resample={}'.format(self.resamplebool.get())) + self.hvresample_int=self.hvresample.get() + else: + resampleLabel.configure(text='resample={}'.format(self.hvresample.get())) + self.hvresample_int=self.hvresample.get() + update_procHVSR_call(self.procHVSR_call) + return True + except ValueError: + return False + + self.resamplebool = tk.BooleanVar() + self.resamplebool.set(True) + ttk.Label(master=hvsrSettingsFrame, text='Resample H/V Curve [bool]').grid(row=4, column=0, padx=(5,0), sticky='w') + resampleCurveBool = ttk.Checkbutton(master=hvsrSettingsFrame, text="", compound='left', variable=self.resamplebool, command=on_curve_resample) # create the Checkbutton widget + resampleCurveBool.grid(row=4, column=1, sticky='w') + + self.hvresample = tk.IntVar() + self.hvresample.set(1000) + self.hvresample_int = self.hvresample.get() + resampleCurveSamples = ttk.Entry(master=hvsrSettingsFrame, textvariable=self.hvresample, width=10, validate='focusout', validatecommand=on_curve_resample) + resampleCurveSamples.grid(row=4, column=2, sticky='w', padx=(5, 10)) + ttk.Label(master=hvsrSettingsFrame, text='[int] # pts in resampled curve (default=1000)').grid(row=4, column=3, padx=(0,0), sticky='w') + + #outlier_curve_std=1.75 + outlierValLabel = ttk.Label(master=hvsrParamsFrame, text="outlier_curve_std=1.75", width=30) + outlierValLabel.grid(row=5, column=0, sticky='ew', pady=(6,6), padx=5) + + ttk.Label(master=hvsrSettingsFrame, text='Outlier St. Dev. [float]').grid(row=6, column=0, columnspan=2, padx=(5,0), sticky='w') + self.outlierRemStDev = tk.DoubleVar() + self.outlierRemStDev.set(1.75) + outlierRemStDev = ttk.Entry(master=hvsrSettingsFrame, textvariable=self.outlierRemStDev, width=10, validate='focusout', validatecommand=on_outlier_std) + outlierRemStDev.grid(row=6, column=1, sticky='w', padx=(5, 10)) + + separator = ttk.Separator(hvsrSettingsFrame, orient='horizontal') + separator.grid(row=7, columnspan=7, sticky='ew', pady=10) + + #hvsr_band=[0.4, 40] + hvsrBandLabel = ttk.Label(master=hvsrParamsFrame, text="hvsr_band=[0.4,40]", width=30) + hvsrBandLabel.grid(row=7, column=0, sticky='w', pady=(20,6), padx=5) + + ttk.Label(hvsrSettingsFrame,text="HVSR Band [Hz]").grid(row=8,column=0, sticky='w', padx=(5,0)) + + hvsr_band_min_settingsEntry = ttk.Entry(hvsrSettingsFrame, width=10, textvariable=self.hvsrBand_min, validate='focusout', validatecommand=on_hvsrband_update) + hvsr_band_min_settingsEntry.grid(row=8,column=1, sticky='ew') + + hvsr_band_max_settingsEntry = ttk.Entry(hvsrSettingsFrame, width=10, textvariable=self.hvsrBand_max, validate='focusout', validatecommand=on_hvsrband_update) + hvsr_band_max_settingsEntry.grid(row=8,column=2, sticky='ew') + + #peak_freq_range=[0.4, 40] + peakFreqRangeLabel = ttk.Label(master=hvsrParamsFrame, text="peak_freq_range=[0.4,40]", width=30) + peakFreqRangeLabel.grid(row=8, column=0, sticky='w', pady=(20,6), padx=5) + + ttk.Label(hvsrSettingsFrame,text="Peak Frequency Range [Hz]").grid(row=9,column=0, sticky='w', padx=(5,0)) + + hvsr_band_min_settingsEntry = ttk.Entry(hvsrSettingsFrame, width=10, textvariable=self.peakFreqRange_min, validate='focusout', validatecommand=on_peakFreqRange_update) + hvsr_band_min_settingsEntry.grid(row=9,column=1, sticky='ew') + + hvsr_band_max_settingsEntry = ttk.Entry(hvsrSettingsFrame, width=10, textvariable=self.peakFreqRange_max, validate='focusout', validatecommand=on_peakFreqRange_update) + hvsr_band_max_settingsEntry.grid(row=9,column=2, sticky='ew') + + #Process HVSR Function Call + hvsrCallFrame = ttk.LabelFrame(hvsr_settings_tab, text='sprit_hvsr.process_hvsr() Call')#.pack(fill='both') + + self.procHVSR_call = ttk.Label(master=hvsrCallFrame, text='process_hvsr({}, {}, {}, {}, {}, \n\t{}, {}, {})' + .format('hvsr_data', hCombMethodLabel.cget('text'), hvSmoothLabel.cget('text'), freqSmoothLabel.cget('text'), fSmoothWidthlabel.cget('text'), resampleLabel.cget('text'), + outlierValLabel.cget('text'), hvsrBandLabel.cget('text'))) + self.procHVSR_call.pack(anchor='w', padx=(25,0), pady=(10,10)) + + + def update_procHVSR_call(procHVSR_call): + procHVSR_call.configure(text='process_hvsr({}, {}, {}, {}, {}, \n\t{}, {}, {})' + .format('hvsr_data', hCombMethodLabel.cget('text'), hvSmoothLabel.cget('text'), freqSmoothLabel.cget('text'), fSmoothWidthlabel.cget('text'), resampleLabel.cget('text'), + outlierValLabel.cget('text'), hvsrBandLabel.cget('text'))) + + #Check Peaks Function Call + checkPeaksCallFrame = ttk.LabelFrame(hvsr_settings_tab, text='sprit_hvsr.check_peaks() Call')#.pack(fill='both') + + self.checkPeaks_Call = ttk.Label(master=checkPeaksCallFrame, text='check_peaks({}, {}, {})' + .format('hvsr_data', hvsrBandLabel.cget('text'), peakFreqRangeLabel.cget('text'))) + self.checkPeaks_Call.pack(anchor='w', padx=(25,0), pady=(10,10)) + + #check_peaks(hvsr_dict, hvsr_band=[0.4, 40], peak_water_level=1.8) + + def update_check_peaks_call(checkPeaks_Call): + checkPeaks_Call.configure(text='check_peaks({}, {}, {})' + .format('hvsr_data', hvsrBandLabel.cget('text'), peakFreqRangeLabel.cget('text'))) + + + #Run button frame + runFrame_set_hvsr = ttk.Frame(hvsr_settings_tab) + runFrame_set_hvsr.columnconfigure(0, weight=1) + + self.settingsProgBar_hvsr = ttk.Progressbar(runFrame_set_hvsr, orient='horizontal') + self.settingsProgBar_hvsr.grid(row=0, column=0, sticky='nsew')#.pack(fill='both',expand=True, side='left', anchor='sw') + + self.run_button = ttk.Button(runFrame_set_hvsr, text="Run", style='Run.TButton', command=process_data) + self.run_button.grid(row=0, column=1, sticky='nsew', padx=2.5)#.pack(side='bottom', anchor='e') + + #Pack tab + runFrame_set_hvsr.pack(fill='both', side='bottom', anchor='e') + checkPeaksCallFrame.pack(fill='both', expand=True, side='bottom')#.grid(row=10, column=0, columnspan=6, sticky='nsew')#.pack(side='bottom', fill='both', anchor='n', expand=True) + hvsrCallFrame.pack(fill='both', expand=True, side='bottom')#.grid(row=9, column=0, columnspan=6, sticky='nsew')#.pack(side='bottom', fill='both', anchor='n', expand=True) + hvsrParamsFrame.pack(fill='both', side='right')#.grid(row=0, column=6, rowspan=4, sticky='nsew')#.pack(side='right',fill='both', anchor='n', expand=True) + hvsrSettingsFrame.pack(fill='both', expand=True, side='top')#.grid(row=0, column=0, columnspan=6, rowspan=4, sticky='nsew')#.pack(fill='both', expand=True) + + hvsr_settings_tab.pack(fill='both', expand=True) + settings_notebook.add(hvsr_settings_tab, text="HVSR Settings") + + #PLOT SETTINGS TAB + plot_settings_tab = ttk.Frame(settings_notebook) + + # Create the Plot Options LabelFrame + plot_options_frame = ttk.LabelFrame(plot_settings_tab, text="Plot Options") + + + def update_hvplot_call(): + kindstr = get_kindstr() + hvplot_label.configure(text="plot_hvsr({}, plot_type={}, xtype='{}', show_legend={}, {}, {})".format('hvsr_data', kindstr, self.x_type.get(), self.show_legend.get(), '[...]', 'kwargs')) + + # Create the Checkbuttons for the plot options + ttk.Label(plot_options_frame, text='HVSR Plot', justify='center').grid(row=0, column=1, sticky='ew', padx=(5, 5)) + ttk.Label(plot_options_frame, text='Components H/V Plot', justify='center').grid(row=0, column=2, sticky='ew', padx=(5, 5)) + ttk.Label(plot_options_frame, text='Spectrogram Plot', justify='center').grid(row=0, column=3, sticky='ew', padx=(5, 5)) + + self.hvsr_chart_bool = tk.BooleanVar() + self.hvsr_chart_bool.set(True) + ttk.Checkbutton(plot_options_frame, text='', variable=self.hvsr_chart_bool, command=update_hvplot_call).grid(row=1, column=1, sticky='nsew', padx=15, pady=(5, 20)) + self.ind_comp_chart_bool = tk.BooleanVar() + self.ind_comp_chart_bool.set(True) + ttk.Checkbutton(plot_options_frame, text='', variable=self.ind_comp_chart_bool, command=update_hvplot_call).grid(row=1, column=2, sticky='nsew', padx=50, pady=(5, 20)) + self.spec_chart_bool = tk.BooleanVar() + self.spec_chart_bool.set(True) + ttk.Checkbutton(plot_options_frame, text='', variable=self.spec_chart_bool, command=update_hvplot_call).grid(row=1, column=3, sticky='nsew', padx=25, pady=(5, 20)) + + ttk.Separator(plot_options_frame, orient='horizontal').grid(row=2, columnspan=5, sticky='ew', pady=5) + + #Separate component chart: c+ + ttk.Label(plot_options_frame, text='Show Components on same chart as H/V Curve:').grid(row=3, column=0, sticky='w', padx=5) + + + def disable_comp_buttons(): + if self.show_comp_with_hv.get(): + self.annotate_best_peak_comp.set(False) + self.show_best_peak_comp.set(False) + bestPeakCompButton.config(state="disabled") + bestPeakCompAnnButton.config(state='disabled') + else: + bestPeakCompButton.config(state="normal") + bestPeakCompAnnButton.config(state='normal') + update_hvplot_call() + + self.show_comp_with_hv = tk.BooleanVar() + self.show_comp_with_hv.set(False) + ttk.Checkbutton(plot_options_frame, text="", variable=self.show_comp_with_hv, + command=disable_comp_buttons).grid(row=3, column=2, sticky="ew", padx=50) + + ttk.Separator(plot_options_frame, orient='horizontal').grid(row=4, columnspan=5, sticky='ew', pady=5) + + #Show Best Peak: p + ttk.Label(plot_options_frame, text='Show Best Peak:').grid(row=5, column=0, sticky='w', padx=5) + + self.show_best_peak_hv = tk.BooleanVar() + self.show_best_peak_hv.set(True) + ttk.Checkbutton(plot_options_frame, text="", variable=self.show_best_peak_hv, command=update_hvplot_call).grid(row=5, column=1, sticky="ew", padx=15) + + self.show_best_peak_comp = tk.BooleanVar() + self.show_best_peak_comp.set(True) + bestPeakCompButton=ttk.Checkbutton(plot_options_frame, text="", variable=self.show_best_peak_comp, command=update_hvplot_call) + bestPeakCompButton.grid(row=5, column=2, sticky="ew", padx=50) + + self.show_best_peak_spec = tk.BooleanVar() + self.show_best_peak_spec.set(False) + ttk.Checkbutton(plot_options_frame, text="", variable=self.show_best_peak_spec, command=update_hvplot_call).grid(row=5, column=3, sticky="ew", padx=25) + + ttk.Separator(plot_options_frame, orient='horizontal').grid(row=6, columnspan=5, sticky='ew') + + #Annotate Best Peak: ann + ttk.Label(plot_options_frame, text='Annotate Best Peak:').grid(row=7, column=0, sticky='w', padx=5) + + self.annotate_best_peak_hv = tk.BooleanVar() + self.annotate_best_peak_hv.set(True) + ttk.Checkbutton(plot_options_frame, text="", variable=self.annotate_best_peak_hv, command=update_hvplot_call).grid(row=7, column=1, sticky="ew", padx=15) + + self.annotate_best_peak_comp = tk.BooleanVar() + self.annotate_best_peak_comp.set(True) + bestPeakCompAnnButton=ttk.Checkbutton(plot_options_frame, text="", variable=self.annotate_best_peak_comp, command=update_hvplot_call) + bestPeakCompAnnButton.grid(row=7, column=2, sticky="ew", padx=50) + + self.annotate_best_peak_spec = tk.BooleanVar() + self.annotate_best_peak_spec.set(True) + ttk.Checkbutton(plot_options_frame, text="", variable=self.annotate_best_peak_spec, command=update_hvplot_call).grid(row=7, column=3, sticky="ew", padx=25) + + ttk.Separator(plot_options_frame, orient='horizontal').grid(row=8, columnspan=5, sticky='ew') + + + #Show all peaks (main H/V curve): all + ttk.Label(plot_options_frame, text='Show All Peaks (H/V Curve):').grid(row=9, column=0, sticky='w', padx=5) + + self.show_all_peaks_hv = tk.BooleanVar() + self.show_all_peaks_hv.set(False) + ttk.Checkbutton(plot_options_frame, text="", variable=self.show_all_peaks_hv, command=update_hvplot_call).grid(row=9, column=1, sticky="ew", padx=15) + + ttk.Separator(plot_options_frame, orient='horizontal').grid(row=10, columnspan=5, sticky='ew') + + #Show all curves: t + ttk.Label(plot_options_frame, text='Show All H/V Curves:').grid(row=11, column=0, sticky='w', padx=5) + + self.show_ind_curves = tk.BooleanVar() + self.show_ind_curves.set(False) + ttk.Checkbutton(plot_options_frame, text="", variable=self.show_ind_curves, command=update_hvplot_call).grid(row=11, column=1, sticky="ew", padx=15) + + ttk.Separator(plot_options_frame, orient='horizontal').grid(row=12, columnspan=5, sticky='ew') + + #Show individual peaks (tp): tp + ttk.Label(plot_options_frame, text='Show Individual Peaks:').grid(row=13, column=0, sticky='w', padx=5) + + self.show_ind_peaks = tk.BooleanVar() + self.show_ind_peaks.set(False) + ttk.Checkbutton(plot_options_frame, text="", variable=self.show_ind_peaks, command=update_hvplot_call).grid(row=13, column=1, sticky="ew", padx=15) + + ttk.Separator(plot_options_frame, orient='horizontal').grid(row=14, columnspan=5, sticky='ew') + + #Show individual peaks (tp): tp + ttk.Label(plot_options_frame, text='Show Standard Deviation:').grid(row=15, column=0, sticky='w', padx=5) + + self.show_stDev_hv = tk.BooleanVar() + self.show_stDev_hv.set(True) + ttk.Checkbutton(plot_options_frame, text="", variable=self.show_stDev_hv, command=update_hvplot_call).grid(row=15, column=1, sticky="ew", padx=15) + + self.show_stDev_comp = tk.BooleanVar() + self.show_stDev_comp.set(True) + ttk.Checkbutton(plot_options_frame, text="", variable=self.show_stDev_comp, command=update_hvplot_call).grid(row=15, column=2, sticky="ew", padx=50) + + ttk.Separator(plot_options_frame, orient='horizontal').grid(row=16, columnspan=5, sticky='ew') + + ttk.Label(plot_options_frame, text='Show Legend:').grid(row=17, column=0, sticky='w', padx=5) + + self.show_legend = tk.BooleanVar() + self.show_legend.set(False) + ttk.Checkbutton(plot_options_frame, text="", variable=self.show_legend, command=update_hvplot_call).grid(row=17, column=2, sticky="ew", padx=50) + + + #Specify X-Type + ttk.Label(plot_options_frame, text='X Type:').grid(row=18, column=0, sticky='w', padx=5, pady=10) + + self.x_type = tk.StringVar() + self.x_type.set('freq') + ttk.Radiobutton(master=plot_options_frame, text='Frequency', variable=self.x_type, value='freq', command=update_hvplot_call).grid(row=18, column=1, sticky='w', padx=(5, 10), pady=10) + ttk.Radiobutton(master=plot_options_frame, text='Period', variable=self.x_type, value='period', command=update_hvplot_call).grid(row=18, column=2, sticky='w', padx=(5, 10), pady=10) + + #kwargs + ttk.Label(plot_options_frame, text='Matplotlib Keyword Arguments (not implemented):').grid(row=19, column=0, sticky='w', padx=5, pady=10) + + self.plot_kwargs = tk.StringVar() + self.plot_kwargs.set("cmap='turbo'") + ttk.Entry(plot_options_frame, textvariable=self.plot_kwargs).grid(row=19, column=1, columnspan=3, sticky="ew", pady=10) + + plot_options_frame.pack(fill='both', expand=True)#.grid(row=1, column=0, padx=10, pady=10, sticky="nsew") + + # Create the plot_hvsr Call LabelFrame + hvplot_call_frame = ttk.LabelFrame(plot_settings_tab, text="plot_hvsr() Call") + + #HVSR + + def get_kindstr(): + if self.hvsr_chart_bool.get(): + kindstr_hv = 'HVSR' + if self.show_best_peak_hv.get(): + kindstr_hv = kindstr_hv + ' p' + if self.annotate_best_peak_hv.get(): + kindstr_hv = kindstr_hv + ' ann' + if self.show_all_peaks_hv.get(): + kindstr_hv = kindstr_hv + ' all' + if self.show_ind_curves.get(): + kindstr_hv = kindstr_hv + ' t' + if self.show_ind_peaks.get(): + kindstr_hv = kindstr_hv.replace('t', 'tp') + if 'tp' not in kindstr_hv: + kindstr_hv = kindstr_hv+ ' tp' + if not self.show_stDev_hv.get(): + kindstr_hv = kindstr_hv + ' -s' + else: + kindstr_hv = '' + + #Comp + if self.ind_comp_chart_bool.get(): + kindstr_c = 'c' + + if not self.show_comp_with_hv.get(): + kindstr_c = kindstr_c + '+' + + if self.show_best_peak_comp.get(): + kindstr_c = kindstr_c + ' p' + if self.annotate_best_peak_comp.get(): + kindstr_c = kindstr_c + ' ann' + if not self.show_stDev_comp.get(): + kindstr_c = kindstr_c + ' -s' + else: + kindstr_c = '' + + #Specgram + if self.spec_chart_bool.get(): + kindstr_spec = 'Spec' + + if self.show_best_peak_spec.get(): + kindstr_spec = kindstr_spec + ' p' + if self.annotate_best_peak_spec.get(): + kindstr_spec = kindstr_spec + ' ann' + else: + kindstr_spec = '' + kindstr = kindstr_hv + ' ' + kindstr_c + ' ' + kindstr_spec + return kindstr + + + # Add a Label widget to the plot_hvsr Call Label section + hvplot_label = ttk.Label(hvplot_call_frame, text="plot_hvsr({}, remove_method='{}', xtype='{}', show_legend={}, {}, {})".format('hvsr_data', get_kindstr(), self.x_type.get(), self.show_legend.get(), '[...]', 'kwargs')) + + #Run button frame + runFrame_set_plot = ttk.Frame(plot_settings_tab) + runFrame_set_plot.columnconfigure(0, weight=1) + + self.settingsProgBar_plot = ttk.Progressbar(runFrame_set_plot, orient='horizontal') + self.settingsProgBar_plot.grid(row=0, column=0, columnspan=10, sticky='ew')#.pack(fill='both',expand=True, side='left', anchor='sw') + self.run_button = ttk.Button(runFrame_set_plot, text="Run", style='Run.TButton', command=process_data) + + + def update_results_plot(): + self.tab_control.select(self.results_tab) + sprit_hvsr.plot_hvsr(self.hvsr_results, plot_type=get_kindstr(), fig=self.fig_results, ax=self.ax_results, show_legend=self.show_legend.get(), use_subplots=True, clear_fig=False) + + self.update_results_plot_button = ttk.Button(runFrame_set_plot, text="Update Plot", style='Noise.TButton', command=update_results_plot, width=30) + + self.update_results_plot_button.grid(row=0, column=11, padx=2.5)#pack(side='right', anchor='se') + self.run_button.grid(row=0, column=12, padx=2.5)#(side='right', anchor='se', padx=(10,0)) + + runFrame_set_plot.pack(fill='both', side='bottom', anchor='e') + hvplot_label.pack(fill='both', expand=True, padx=(10,0))#.grid(column=0, row=0, padx=10, pady=10, sticky="w") + hvplot_call_frame.pack(fill='both', expand=True)#.grid(row=2, column=0, padx=10, pady=10, sticky="nsew") + + plot_settings_tab.pack(fill='both', expand=True) + settings_notebook.add(plot_settings_tab, text="Plot Settings") + + # Pack the settings Notebook widget + settings_notebook.pack(expand=True, fill='both') + self.tab_control.add(self.settings_tab, text="Settings") + + # RESULTS TAB + self.results_tab = ttk.Frame(self.tab_control) + self.hvsr_results = {'site':''}#Just initialize for now + + # Create the Batch Site selection LabelFrame + self.results_siteSelectFrame = ttk.LabelFrame(self.results_tab, text="HVSR Results") + self.results_siteSelectLabel = ttk.Label(self.results_siteSelectFrame, text='Select Site ') + + def on_site_select(): + sitename =self.selectedSite.get() + try: + report_results(self.hvsr_results[sitename]) + except: + if sitename=='': + pass + else: + messagebox.showwarning(title='WARNING', message=f"Site {sitename} does not exist") + + if isinstance(self.hvsr_results, sprit_hvsr.HVSRBatch): + sites = self.hvsr_results.sites + else: + sites = [self.hvsr_results['site']] + self.site_options = sites + self.selectedSite = tk.StringVar(value=sites[0]) + self.site_dropdown = ttk.Combobox(self.results_siteSelectFrame, textvariable=self.selectedSite, values=self.site_options, validate='focusout', validatecommand=on_site_select) + self.site_dropdown.config(width=30) + self.results_siteSelectLabel.grid(row=0, column=0, columnspan=1, sticky='ew') + self.site_dropdown.grid(row=0, column=1, columnspan=4, sticky='ew') + + self.browse_results_fig = ttk.Button(self.results_siteSelectFrame, text="Update site",command=on_site_select) + self.browse_results_fig.grid(row=0, column=8, sticky='ew', padx=5) + + self.results_siteSelectFrame.columnconfigure(9, weight=1) + + + def update_site_dropdown(): + self.site_dropdown['values'] = self.site_options + + + #lambda value=string: self.om_variable.set(value) + # Create the plot_hvsr Call LabelFrame + self.results_chartFrame = ttk.LabelFrame(self.results_tab, text="Data Plots") + + #Set up plot + self.fig_results = plt.figure() + results_mosaic = [['hvsr'],['comp'],['spec']] + self.ax_results = self.fig_results.subplot_mosaic(results_mosaic) + + self.results_canvas = FigureCanvasTkAgg(self.fig_results, master=self.results_chartFrame) # A tk.DrawingArea. + self.results_canvas.draw() + self.results_canvasWidget = self.results_canvas.get_tk_widget() + self.results_toolbar = NavigationToolbar2Tk(self.results_canvas, self.results_chartFrame, pack_toolbar=False) + self.results_toolbar.update() + self.results_toolbar.pack(fill=tk.X, side=tk.BOTTOM, expand=False) + self.results_canvasWidget.pack(fill='both', expand=True) + + #Peak report + self.results_peakInfoFrame = ttk.LabelFrame(self.results_tab, text="Peak Report") + self.curveTitleLabel = ttk.Label(self.results_peakInfoFrame, text='Criteria for Reliable H/V Curve (all 3 must pass)') + self.curveTest1Label = ttk.Label(self.results_peakInfoFrame, text='Window Length for Frequency') + self.curveTest1ResultFrame = ttk.Frame(self.results_peakInfoFrame) + self.curveTest1ResultText = ttk.Label(self.curveTest1ResultFrame, text='') + self.curveTest1Result = ttk.Label(self.curveTest1ResultFrame, text='') + + self.curveTest2Label = ttk.Label(self.results_peakInfoFrame, text='Number of Significant Cycles') + self.curveTest2ResultFrame = ttk.Frame(self.results_peakInfoFrame) + self.curveTest2ResultText = ttk.Label(self.curveTest2ResultFrame, text='') + self.curveTest2Result = ttk.Label(self.curveTest2ResultFrame, text='') + + self.curveTest3Label = ttk.Label(self.results_peakInfoFrame, text='Low Curve Standard Deviation for Frequencies Near Peak Over Time') + self.curveTest3ResultFrame = ttk.Frame(self.results_peakInfoFrame) + self.curveTest3ResultText = ttk.Label(self.curveTest3ResultFrame, text='') + self.curveTest3Result = ttk.Label(self.curveTest3ResultFrame, text='') + + self.totalCurveResult = ttk.Label(self.results_peakInfoFrame, text='') + + self.peakTitleLabel = ttk.Label(self.results_peakInfoFrame, text='Criteria for a Clear H/V Peak (5/6 must pass)') + self.peakTest1Label = ttk.Label(self.results_peakInfoFrame, text='H/V Amplitude is low Below Peak Frequency') + self.peakTest1ResultFrame = ttk.Frame(self.results_peakInfoFrame) + self.peakTest1ResultText = ttk.Label(self.peakTest1ResultFrame, text='') + self.peakTest1Result = ttk.Label(self.peakTest1ResultFrame, text='') + + self.peakTest2Label = ttk.Label(self.results_peakInfoFrame, text='H/V Amplitude is low Above Peak Frequency') + self.peakTest2ResultFrame = ttk.Frame(self.results_peakInfoFrame) + self.peakTest2ResultText = ttk.Label(self.peakTest2ResultFrame, text='') + self.peakTest2Result = ttk.Label(self.peakTest2ResultFrame, text='') + + self.peakTest3Label = ttk.Label(self.results_peakInfoFrame, text='Peak is Prominent') + self.peakTest3ResultFrame = ttk.Frame(self.results_peakInfoFrame) + self.peakTest3ResultText = ttk.Label(self.peakTest3ResultFrame, text='') + self.peakTest3Result = ttk.Label(self.peakTest3ResultFrame, text='') + + self.peakTest4Label = ttk.Label(self.results_peakInfoFrame, text='Frequency of Peak is Stationary Over Time') + self.peakTest4ResultFrame = ttk.Frame(self.results_peakInfoFrame) + self.peakTest4ResultText = ttk.Label(self.peakTest4ResultFrame, text='') + self.peakTest4Result = ttk.Label(self.peakTest4ResultFrame, text='') + + self.peakTest5Label = ttk.Label(self.results_peakInfoFrame, text='Standard Deviation of Peak Frequency is low ') + self.peakTest5ResultFrame = ttk.Frame(self.results_peakInfoFrame) + self.peakTest5ResultText = ttk.Label(self.peakTest5ResultFrame, text='') + self.peakTest5Result = ttk.Label(self.peakTest5ResultFrame, text='') + + self.peakTest6Label = ttk.Label(self.results_peakInfoFrame, text='Standard Deviation of Peak Amplitude is low') + self.peakTest6ResultFrame = ttk.Frame(self.results_peakInfoFrame) + self.peakTest6ResultText = ttk.Label(self.peakTest6ResultFrame, text='') + self.peakTest6Result = ttk.Label(self.peakTest6ResultFrame, text='') + + self.totalPeakResult = ttk.Label(self.results_peakInfoFrame, text='') + + self.totalResult = ttk.Label(self.results_peakInfoFrame, text='') + + self.curveTitleLabel.grid(row=0, sticky='w', padx=5, pady=2.5) + self.curveTitleLabel.configure(font=("TkDefaultFont", 12, 'underline', 'bold')) + self.curveTest1Label.grid(row=1, sticky='w', padx=5, pady=2.5) + self.curveTest1ResultFrame.grid(row=2, sticky='ew', padx=5, pady=2.5) + self.curveTest1ResultFrame.columnconfigure(0, weight=1) + self.curveTest1ResultFrame.columnconfigure(1, weight=6) + self.curveTest1ResultText.grid(row=0, column=0, sticky='e', padx=5, pady=2.5) + self.curveTest1Result.grid(row=0, column=1, sticky='e', padx=5, pady=2.5) + + self.curveTest2Label.grid(row=3, sticky='w', padx=5, pady=2.5) + self.curveTest2ResultFrame.grid(row=4, sticky='ew', padx=5, pady=2.5) + self.curveTest2ResultFrame.columnconfigure(0, weight=1) + self.curveTest2ResultFrame.columnconfigure(1, weight=6) + self.curveTest2ResultText.grid(row=0, column=0, sticky='e', padx=5, pady=2.5) + self.curveTest2Result.grid(row=0, column=1, sticky='e', padx=5, pady=2.5) + + self.curveTest3Label.grid(row=5, sticky='w', padx=5, pady=2.5) + self.curveTest3ResultFrame.grid(row=6, sticky='ew', padx=5, pady=2.5) + self.curveTest3ResultFrame.columnconfigure(0, weight=1) + self.curveTest3ResultFrame.columnconfigure(1, weight=6) + self.curveTest3ResultText.grid(row=0, column=0, sticky='e', padx=5, pady=2.5) + self.curveTest3Result.grid(row=0, column=1, sticky='e', padx=5, pady=2.5) + + self.totalCurveResult.grid(row=7, sticky='e', padx=5, pady=10 ) + + ttk.Separator(self.results_peakInfoFrame).grid(row=8, sticky='ew', pady=5) + + self.peakTitleLabel.grid(row=9, sticky='w', padx=5, pady=2.5) + self.peakTitleLabel.configure(font=("TkDefaultFont", 12, 'underline', 'bold')) + + self.peakTest1Label.grid(row=11, sticky='w', padx=5, pady=2.5) + self.peakTest1ResultFrame.grid(row=12, sticky='ew', padx=5, pady=2.5) + self.peakTest1ResultFrame.columnconfigure(0, weight=1) + self.peakTest1ResultFrame.columnconfigure(1, weight=6) + self.peakTest1ResultText.grid(row=0, column=0, sticky='e', padx=5, pady=2.5) + self.peakTest1Result.grid(row=0, column=1, sticky='e', padx=5, pady=2.5) + + self.peakTest2Label.grid(row=13, sticky='w', padx=5, pady=2.5) + self.peakTest2ResultFrame.grid(row=14, sticky='ew', padx=5, pady=2.5) + self.peakTest2ResultFrame.columnconfigure(0, weight=1) + self.peakTest2ResultFrame.columnconfigure(1, weight=6) + self.peakTest2ResultText.grid(row=0, column=0, sticky='e', padx=5, pady=2.5) + self.peakTest2Result.grid(row=0, column=1, sticky='e', padx=5, pady=2.5) + + self.peakTest3Label.grid(row=15, sticky='w', padx=5, pady=2.5) + self.peakTest3ResultFrame.grid(row=16, sticky='ew', padx=5, pady=2.5) + self.peakTest3ResultFrame.columnconfigure(0, weight=1) + self.peakTest3ResultFrame.columnconfigure(1, weight=6) + self.peakTest3ResultText.grid(row=0, column=0, sticky='e', padx=5, pady=2.5) + self.peakTest3Result.grid(row=0, column=1, sticky='e', padx=5, pady=2.5) + + self.peakTest4Label.grid(row=17, sticky='w', padx=5, pady=2.5) + self.peakTest4ResultFrame.grid(row=18, sticky='ew', padx=5, pady=2.5) + self.peakTest4ResultFrame.columnconfigure(0, weight=1) + self.peakTest4ResultFrame.columnconfigure(1, weight=6) + self.peakTest4ResultText.grid(row=0, column=0, sticky='e', padx=5, pady=2.5) + self.peakTest4Result.grid(row=0, column=1, sticky='e', padx=5, pady=2.5) + + self.peakTest5Label.grid(row=19, sticky='w', padx=5, pady=2.5) + self.peakTest5ResultFrame.grid(row=20, sticky='ew', padx=5, pady=2.5) + self.peakTest5ResultFrame.columnconfigure(0, weight=1) + self.peakTest5ResultFrame.columnconfigure(1, weight=6) + self.peakTest5ResultText.grid(row=0, column=0, sticky='e', padx=5, pady=2.5) + self.peakTest5Result.grid(row=0, column=1, sticky='e', padx=5, pady=2.5) + + self.peakTest6Label.grid(row=21, sticky='w', padx=5, pady=2.5) + self.peakTest6ResultFrame.grid(row=22, sticky='ew', padx=5, pady=2.5) + self.peakTest6ResultFrame.columnconfigure(0, weight=1) + self.peakTest6ResultFrame.columnconfigure(1, weight=6) + self.peakTest6ResultText.grid(row=0, column=0, sticky='e', padx=5, pady=2.5) + self.peakTest6Result.grid(row=0, column=1, sticky='e', padx=5, pady=2.5) + + self.totalPeakResult.grid(row=23, sticky='e', padx=5, pady=10 ) + + ttk.Separator(self.results_peakInfoFrame).grid(row=24, sticky='ew', pady=5) + + self.totalResult.grid(row=25, sticky='e', padx=5, pady=10 ) + + self.resultsProgBar = ttk.Progressbar(self.results_peakInfoFrame, orient='horizontal') + self.resultsProgBar.grid(row=26, column=0, sticky='ew') + + #Export results + self.results_export_Frame = ttk.LabelFrame(self.results_tab, text="Export Results") + + ttk.Label(self.results_export_Frame, text="Export Figure").grid(row=0, column=0, sticky='ew', padx=5) + self.results_fig_dir = tk.StringVar() + self.results_fig_dir_entry = ttk.Entry(self.results_export_Frame, textvariable=self.results_fig_dir) + self.results_fig_dir_entry.grid(row=0, column=1, columnspan=5, sticky='ew') + + def filepath_results_fig(): + filepath = filedialog.asksaveasfilename(defaultextension='png', initialdir=pathlib.Path(self.data_path.get()).parent, initialfile=self.params['site']+'_results.png') + if filepath: + self.results_fig_dir_entry.delete(0, 'end') + self.results_fig_dir_entry.insert(0, filepath) + + def save_results_fig(): + if not self.save_ind_subplots.get(): + self.fig_results.savefig(self.results_fig_dir.get()) + else: + for key in self.ax_results.keys(): + extent = self.ax_results[key].get_tightbbox(self.fig_results.canvas.renderer).transformed(self.fig_results.dpi_scale_trans.inverted()) + self.fig_results.savefig(pathlib.Path(self.results_fig_dir.get()).parent.as_posix()+'/Subplot'+key+'.png', bbox_inches=extent) + + + self.browse_results_fig = ttk.Button(self.results_export_Frame, text="Browse",command=filepath_results_fig) + self.browse_results_fig.grid(row=0, column=7, sticky='ew', padx=2.5) + + self.save_results_fig = ttk.Button(self.results_export_Frame, text="Save",command=save_results_fig) + self.save_results_fig.grid(row=0, column=8, columnspan=2, sticky='ew', padx=2.5) + + #Save subplots individually + self.save_ind_subplots = tk.BooleanVar() + self.save_ind_subplots.set(False) + ttk.Checkbutton(self.results_export_Frame, text="Save ind. subplots", variable=self.save_ind_subplots).grid(row=0, column=10, sticky="ew", padx=5) + + #Export Peak Report + ttk.Label(self.results_export_Frame, text="Export Peak Report").grid(row=1, column=0, sticky='ew', padx=5) + self.results_report_dir = tk.StringVar() + self.results_report_dir_entry = ttk.Entry(self.results_export_Frame, textvariable=self.results_report_dir) + self.results_report_dir_entry.grid(row=1, column=1, columnspan=5, sticky='ew') + + def filepath_report_fig(): + filepath = filedialog.asksaveasfilename(defaultextension='csv', initialdir=pathlib.Path(self.data_path.get()).parent, initialfile=self.params['site']+'_peakReport.csv') + if filepath: + self.results_report_dir_entry.delete(0, 'end') + self.results_report_dir_entry.insert(0, filepath) + + def save_report_fig(): + sprit_hvsr.get_report(self.hvsr_results, format='plot', export=self.results_report_dir.get()) + + self.browse_results_fig = ttk.Button(self.results_export_Frame, text="Browse",command=filepath_report_fig) + self.browse_results_fig.grid(row=1, column=7, sticky='ew', padx=2.5) + + self.save_results_fig = ttk.Button(self.results_export_Frame, text="Save",command=save_report_fig) + self.save_results_fig.grid(row=1, column=8, columnspan=2, sticky='ew', padx=2.5) + self.results_export_Frame.columnconfigure(1, weight=1) + + self.results_siteSelectFrame.grid(row=0,column=0, columnspan=8, rowspan=2, sticky='ew') + self.results_peakInfoFrame.grid(row=0, column=9, columnspan=2, rowspan=8, sticky='nsew')#.pack(side='right', fill='both') + self.results_chartFrame.grid(row=2, column=0, columnspan=8, rowspan=6, sticky='nsew')#.pack(side='top', expand=True, fill='both') + self.results_export_Frame.grid(row=9, column=0, columnspan=11,rowspan=2,sticky='nsew')#.pack(side='bottom', fill='x') + self.results_tab.columnconfigure(0, weight=1) + self.results_tab.rowconfigure(2, weight=1) + + + # LOG TAB + self.log_tab = ttk.Frame(self.tab_control) + + from tkinter import scrolledtext + self.logFrame = ttk.LabelFrame(self.log_tab, text='Log') + self.logFrame.columnconfigure(0, weight=1) + self.logFrame.rowconfigure(0, weight=1) + + self.log_text = scrolledtext.ScrolledText(self.logFrame, wrap = tk.WORD)#, width=200, height=50) + self.log_text.configure(font=("Courier", 11)) + #text_area.grid(row=0, column=0, sticky='nsew') + self.log_text.grid(row=0, rowspan=10, column=0, sticky='nsew')#.pack(fill='both', expand=True) + + self.logProgBar = ttk.Progressbar(self.logFrame, orient='horizontal') + self.logProgBar.grid(row=11, column=0, sticky='nsew') + + introLogText = "Log of active session:\n" + self.log_text.insert('end', introLogText) + #log_text.configure(bg='black', fg='white') + + + self.logFrame.pack(fill='both', expand=True)#.pack(fill='both', expand=True, side='top', anchor='nw') + self.log_tab.pack(fill='both', expand=True, side='left', anchor='nw') + + # Add log tab to tab control + self.tab_control.add(self.log_tab, text="Log") + # Add result tab to tab control + self.tab_control.add(self.results_tab, text="Results".center(11, ' ').center(13,'|')) + + # Pack tab control + self.tab_control.pack(expand=True, fill="both")
+
+ + + +#Decorator that catches errors and warnings (to be modified later for gui) +
+[docs] +def catch_errors(func): + global spritApp + + #Define a local function to get a list of warnings that we'll use in the output + def get_warning_msg_list(w): + messageList = [] + #Collect warnings that happened before we got to the error + if w: + hasWarnings = True + for wi in w: + warning_category = type(wi.message).__name__.title().replace('warning','Warning') + #if w.line is None: + # w.line = linecache.getline(wi.filename, wi.lineno) + warning_lineNo = wi.lineno + warning_message = str(wi.message) + # append the warning category and message to messageList so we get all warnings + messageList.append(f'{warning_category} ({warning_lineNo}): {warning_message}') + return messageList + + # use functools.wraps to preserve the original function's metadata + @functools.wraps(func) + def wrapper(*args, **kwargs): + result = None + # use the global keyword to access the error_message and error_category variables + global error_message + global error_category + global spritApp + + messageList = [] + hasWarnings = False + # use a try-except block to catch any exceptions + #result = func(*args, **kwargs) + try: + # use a context manager to catch any warnings + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + # call the original function with the given arguments + result = func(*args, **kwargs) + + #Get message list, [] if no messages, doesn't run at all if Error/exception in func + messageList = get_warning_msg_list(w) + if messageList == []: + return result + else: + warningMessage = "WARNING:" + for msg in messageList: + warningMessage = "\n {}".format(msg) + + messagebox.showwarning(title='WARNINGS', message=warningMessage) + + except Exception as e: + messageList = get_warning_msg_list(w) + errorObj = sys.exc_info()[2] + + mainErrText = sys.exc_info()[1] + + mainErrTb = traceback.extract_tb(sys.exc_info()[2])[-1] + mainErrFilePath = pathlib.Path(mainErrTb[0]) + + mainErrFileName = mainErrFilePath.stem + mainErrLineNo = mainErrTb[1] + mainErrFunc = mainErrTb[2] + mainErrCodeLine = mainErrTb[3] + + errLineNo1 = str(traceback.extract_tb(sys.exc_info()[2])[-1].lineno) + error_category = type(e).__name__.title().replace('error', 'Error') + error_message = f"{e} ({errLineNo1})" + + #Get message list, [] if no messages, doesn't run at all if Error/exception in func + warningMessageList = get_warning_msg_list(w) + + #Build error messages + tbTuple0 = sys.exc_info()[0] + tbTuple1 = sys.exc_info()[1] + tbTuple2 = traceback.extract_tb(sys.exc_info()[2]) + + logMsg = f"**ERROR**\n{tbTuple0.__name__}: {tbTuple1}" + dialogErrMsg = logMsg.split(':')[1] + for tb in tbTuple2: + logMsg = logMsg + '\n\t' + logMsg = logMsg + f"{pathlib.Path(tb[0]).stem}.{tb[2]}(): {tb[3]} (Line {tb[1]})" + dialogErrMsg = dialogErrMsg + f"\n{pathlib.Path(tb[0]).stem}.{tb[2]}(), Line {tb[1]}" + logMsg = logMsg + '\n\n' + + #fullErrorMessage = f'ERROR {mainErrFileName}.{mainErrFunc} ({mainErrLineNo}): {mainErrText} \n\n {mainErrFileName} Line {mainErrLineNo}: {mainErrCodeLine}.' + if messageList == []: + pass + else: + dialogErrMsg = dialogErrMsg+"\n\n Additional Warnings along the way. See Log for more information." + logMsg = logMsg + "\n\n\t *WARNING(S)*\n\tAdditional Warnings along the way:" + for addMsg in warningMessageList: + logMsg = logMsg+"\n\t\t{}".format(addMsg) + + + SPRIT_App.log_errorMsg(spritApp, logMsg) + + messagebox.showerror(title=f'ERROR ({error_category})', + message=dialogErrMsg) + update_progress_bars(100) + + # return the result of the function or the error/warning messages and categories + return result + # return the wrapper function + return wrapper
+ + +
+[docs] +def on_closing(): + plt.close('all') + root.destroy() + sys.exit()
+ + +
+[docs] +def reboot_app(): + """Restarts the current program. + Note: this function does not return. Any cleanup action (like + saving data) must be done before calling this function.""" + python = sys.executable + os.execl(python, python, * sys.argv)
+ + +if __name__ == "__main__": + can_gui = sprit_utils.check_gui_requirements() + + if can_gui: + global root + root = tk.Tk() + try: + try: + icon_path =pathlib.Path(pkg_resources.resource_filename(__name__, 'resources/icon/sprit_icon_alpha.ico')) + root.iconbitmap(icon_path) + except: + icon_path = pathlib.Path(pkg_resources.resource_filename(__name__, 'resources/icon/sprit_icon.png')) + root.iconphoto(False, tk.PhotoImage(file=icon_path.as_posix())) + except Exception as e: + print("ICON NOT LOADED, still opening GUI") + + root.resizable(True, True) + spritApp = SPRIT_App(root) + root.protocol("WM_DELETE_WINDOW", on_closing) + root.mainloop() + else: + print("GUI cannot be created") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/_modules/sprit/sprit_utils.html b/docs/_modules/sprit/sprit_utils.html new file mode 100644 index 00000000..bca38c08 --- /dev/null +++ b/docs/_modules/sprit/sprit_utils.html @@ -0,0 +1,671 @@ + + + + + + sprit.sprit_utils — sprit 1.4 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for sprit.sprit_utils

+import datetime
+import functools
+import inspect
+import os
+import pathlib
+import subprocess
+import sys
+import traceback
+import warnings
+import zoneinfo
+
+import numpy as np
+from obspy.core.utcdatetime import UTCDateTime
+
+try:  # For distribution
+    from sprit import sprit_hvsr
+except Exception: #For testing
+    import sprit_hvsr
+    pass
+
+greek_chars = {'sigma': u'\u03C3', 'epsilon': u'\u03B5', 'teta': u'\u03B8'}
+channel_order = {'Z': 0, '1': 1, 'N': 1, '2': 2, 'E': 2}
+
+
+[docs] +def assert_check(var, cond=None, var_type=None, error_message='Output not valid', verbose=False): + if var_type is not None: + assert isinstance(var, var_type), error_message + if verbose: + print(f"Output valid: {var} is instance of {var_type}", end='') + + if cond is not None: + assert cond, error_message + if verbose: + if var_type is None: + print('Output valid:', end=' ') + else: + print(' and ', end='') + print(f"test condition is met.")
+ + + +
+[docs] +def check_gui_requirements(): + #First, check requirements + # Define a command that tries to open a window + command = "python -c \"import tkinter; tkinter.Tk()\"" + + # Run the command and get the exit code + exit_code = os.system(command) + + # Check if tkinter gui could be created + if exit_code == 0: + #Tkinter + oktoproceed=True + else: + oktoproceed=False + print("GUI window cannot be created.") + + return oktoproceed
+ + + #if sys.platform == 'linux': + # # Check if qtwayland5 is installed + # output = subprocess.run(["dpkg", "-s", "qtwayland5"], capture_output=True, text=True) + # if "Status: install ok installed" in output.stdout: + # print("qtwayland5 is already installed") + # else: + # print("qtwayland5 is not installed") + # # Install qtwayland5 + # os.system("sudo apt install qtwayland5") + +#Get check mark +
+[docs] +def check_mark(incolor=False, interminal=False): + """The default Windows terminal is not able to display the check mark character correctly. + This function returns another displayable character if platform is Windows""" + if incolor: + try: + check = get_char(u'\u2705') + except Exception: + check = get_char(u'\u2714') + else: + check = get_char(u'\u2714') + + if sys.platform=='win32' and interminal: + check = get_char(u'\u039E') + return check
+ + +#Converts filepaths to pathlib paths, if not already +
+[docs] +def checkifpath(filepath, sample_list='', verbose=False, raise_error=False): + """Support function to check if a filepath is a pathlib.Path object and tries to convert if not + + Parameters + ---------- + filepath : str or pathlib.Path, or anything + Filepath to check. If not a valid filepath, will not convert and raises error + + Returns + ------- + filepath : pathlib.Path + pathlib.Path of filepath + """ + if sample_list=='': + sample_list = ['1', '2', '3', '4', '5', '6', 'sample', 'batch', 'sample', 'sample_batch'] + for s in range(1, 7): + sample_list.append(f"sample{s}") + sample_list.append(f"sample_{s}") + + # checks if the variable is any instance of pathlib + if isinstance(filepath, pathlib.PurePath): + pass + elif str(filepath) in sample_list: + pass + else: + try: + filepath = pathlib.Path(filepath) + except Exception: + if verbose: + warnings.warn('Filepath cannot be converted to pathlib path: {}'.format(filepath)) + if not filepath.exists(): + + raise RuntimeError('File does not exist: {}'.format(filepath)) + return filepath
+ + +#Check to make the number of time-steps are the same for each channel +
+[docs] +def check_tsteps(hvsr_data): + """Check time steps of PPSDS to make sure they are all the same length""" + ppsds = hvsr_data['ppsds'] + tSteps = [] + for k in ppsds.keys(): + tSteps.append(np.array(ppsds[k]['psd_values']).shape[0]) + if len(set(tSteps)) <= 1: + pass #This means all channels have same number of period_bin_centers + minTStep=tSteps[0] + else: + print('There is a different number of time-steps used to calculate HVSR curves. \n This may result in computational errors. Trimming longest.') + minTStep = min(tSteps) + return minTStep
+ + +#Check the x-values for each channel, to make sure they are all the same length +
+[docs] +def check_xvalues(ppsds): + """Check x_values of PPSDS to make sure they are all the same length""" + xLengths = [] + for k in ppsds.keys(): + xLengths.append(len(ppsds[k]['period_bin_centers'])) + if len(set(xLengths)) <= 1: + pass #This means all channels have same number of period_bin_centers + else: + print('X-values (periods or frequencies) do not have the same values. \n This may result in computational errors') + #Do stuff to fix it? + return ppsds
+ + +#Formats time into desired output +
+[docs] +def format_time(inputDT, tzone='UTC'): + """Private function to format time, used in other functions + + Formats input time to datetime objects in utc + + Parameters + ---------- + inputDT : str or datetime obj + Input datetime. Can include date and time, just date (time inferred to be 00:00:00.00) or just time (if so, date is set as today) + tzone : str='utc' or int {'utc', 'local'} + Timezone of data entry. + If string and not utc, assumed to be timezone of computer running the process. + If int, assumed to be offset from UTC (e.g., CST in the United States is -6; CDT in the United States is -5) + + Returns + ------- + outputTimeObj : datetime object in UTC + Output datetime.datetime object, now in UTC time. + + """ + # Initialize values + year = 2000 + month = 1 + day = 1 + + # Parse whether inputDT has date or not + if isinstance(inputDT, str): + #tzone = 'America/Chicago' + #Format string to datetime obj + div = '-' + timeDiv = 'T' + if "/" in inputDT: + div = '/' + hasDate = True + elif '-' in inputDT: + div = '-' + hasDate = True + else: + hasDate= False + year = datetime.datetime.today().year + month = datetime.datetime.today().month + day = datetime.datetime.today().day + + if ':' in inputDT: + hasTime = True + if 'T' in inputDT: + timeDiv = 'T' + else: + timeDiv = ' ' + else: + hasTime = False + + if hasDate: + #If first number is 4-dig year (assumes yyyy-dd-mm is not possible) + if len(inputDT.split(div)[0])>2: + year = inputDT.split(div)[0] + month = inputDT.split(div)[1] + day = inputDT.split(div)[2].split(timeDiv)[0] + + #If last number is 4-dig year + elif len(inputDT.split(div)[2].split(timeDiv)[0])>2: + #..and first number is day + if int(inputDT.split(div)[0])>12: + #dateStr = '%d'+div+'%m'+div+'%Y' + year = inputDT.split(div)[2].split(timeDiv)[0] + month = inputDT.split(div)[1] + day = inputDT.split(div)[0] + #...and first number is month (like American style) + else: + year = inputDT.split(div)[2].split(timeDiv)[0] + month = inputDT.split(div)[0] + day = inputDT.split(div)[1] + + #Another way to catch if first number is (2-digit) year + elif int(inputDT.split(div)[0])>31: + #dateStr = '%y'+div+'%m'+div+'%d' + year = inputDT.split(div)[0] + #Assumes anything less than current year is from this century + if year < datetime.datetime.today().year: + year = '20'+year + else:#...and anything more than current year is from last century + year = '19'+year + #assumes day will always come last in this instance, as above + month = inputDT.split(div)[1] + day = inputDT.split(div)[2].split(timeDiv)[0] + + #If last digit is (2 digit) year + elif int(inputDT.split(div)[2].split(timeDiv)[0])>31: + #...and first digit is day + if int(inputDT.split(div)[0])>12: + #dateStr = '%d'+div+'%m'+div+'%y' + year = inputDT.split(div)[2].split(timeDiv)[0] + if year < datetime.datetime.today().year: + year = '20'+year + else: + year = '19'+year + month = inputDT.split(div)[1] + day = inputDT.split(div)[0] + else: #...and second digit is day + #dateStr = '%m'+div+'%d'+div+'%y' + year = inputDT.split(div)[2].split(timeDiv)[0] + if year < datetime.datetime.today().year: + year = '20'+year + else: + year = '19'+year + month = inputDT.split(div)[0] + day = inputDT.split(div)[1] + + hour = 0 + minute = 0 + sec = 0 + microS = 0 + if hasTime: + if hasDate: + timeStr = inputDT.split(timeDiv)[1] + else: + timeStr = inputDT + + if 'T' in timeStr: + timeStr=timeStr.split('T')[1] + elif ' ' in timeStr: + timeStr=timeStr.split(' ')[1] + + timeStrList = timeStr.split(':') + if len(timeStrList[0])>2: + timeStrList[0] = timeStrList[0][-2:] + elif int(timeStrList[0]) > 23: + timeStrList[0] = timeStrList[0][-1:] + + if len(timeStrList) == 3: + if '.' in timeStrList[2]: + microS = int(timeStrList[2].split('.')[1]) + timeStrList[2] = timeStrList[2].split('.')[0] + elif len(timeStrList) == 2: + timeStrList.append('00') + + hour = int(timeStrList[0]) + minute = int(timeStrList[1]) + sec = int(timeStrList[2]) + + outputTimeObj = datetime.datetime(year=int(year),month=int(month), day=int(day), + hour=int(hour), minute=int(minute), second=int(sec), microsecond=int(microS)) + elif isinstance(inputDT, (datetime.datetime, datetime.time)): + outputTimeObj = inputDT + elif isinstance(inputDT, UTCDateTime): + outputTimeObj = inputDT.datetime + + # Add timezone info + availableTimezones = list(map(str.lower, zoneinfo.available_timezones())) + if outputTimeObj.tzinfo is not None and outputTimeObj.tzinfo.utcoffset(outputTimeObj) is not None: + # This is already timezone aware + pass + elif type(tzone) is int: + outputTimeObj = outputTimeObj-datetime.timedelta(hours=tzone) + elif type(tzone) is str: + if tzone.lower() in availableTimezones: + outputTimeObj = outputTimeObj.replace(tzinfo=zoneinfo.ZoneInfo(tzone)) + else: + raise ValueError("Timezone {} is not in official list. \nAvailable timezones:\n{}".format(tzone, availableTimezones)) + elif isinstance(tzone, zoneinfo.ZoneInfo): + outputTimeObj = outputTimeObj.replace(tzinfo=tzone) + else: + raise ValueError("Timezone must be either str, int, or zoneinfo.ZoneInfo object") + + #Convert to UTC + outputTimeObj = outputTimeObj.astimezone(datetime.timezone.utc) + + return outputTimeObj
+ + +#Get character for printing +
+[docs] +def get_char(in_char): + """Outputs character with proper encoding/decoding""" + if in_char in greek_chars.keys(): + out_char = greek_chars[in_char].encode(encoding='utf-8') + else: + out_char = in_char.encode(encoding='utf-8') + return out_char.decode('utf-8')
+ + + +# Get fuller traceback information on errors +def _get_error_from_exception(exception=None): + if exception is not None: + traceback.print_exception(sys.exc_info()[1]) + exc_type, exc_obj, tb = sys.exc_info() + f = tb.tb_frame + lineno = tb.tb_lineno + filename = f.f_code.co_filename + errLineNo = str(traceback.extract_tb(sys.exc_info()[2])[-1].lineno) + error_category = type(exception).__name__.title().replace('error', 'Error') + error_message = f"{exception} ({errLineNo})" + print(f"{error_category} ({errLineNo}): {error_message}") + print(lineno, filename, f) + + +#Check that input strema has Z, E, N channels +
+[docs] +def has_required_channels(stream): + channel_set = set() + + # Extract the channel codes from the traces in the stream + for trace in stream: + channel_set.add(trace.stats.channel) + + # Check if Z, E, and N channels are present + return {'Z', 'E', 'N'}.issubset(channel_set)
+ + +#Make input data (dict) into sprit_hvsr class +
+[docs] +def make_it_classy(input_data, verbose=False): + if isinstance(input_data, (sprit_hvsr.HVSRData, sprit_hvsr.HVSRBatch)): + for k, v in input_data.items(): + if k=='input_params': + for kin in input_data['input_params'].keys(): + if kin not in input_data.keys(): + input_data[kin] = input_data['input_params'][kin] + if k=='params': + for kin in input_data['params'].keys(): + if kin not in input_data.keys(): + input_data[kin] = input_data['params'][kin] + output_class = input_data + else: + output_class = sprit_hvsr.HVSRData(input_data) + if verbose: + print('Made it classy | {} --> {}'.format(type(input_data), type(output_class))) + return output_class
+ + +#Read data directly from Raspberry Shake +
+[docs] +def read_from_RS(dest, src='SHAKENAME@HOSTNAME:/opt/data/archive/YEAR/AM/STATION/', opts='az', username='myshake', password='shakeme',hostname='rs.local', year='2023', sta='RAC84',sleep_time=0.1, verbose=True, save_progress=True, method='scp'): + src = src.replace('SHAKENAME', username) + src = src.replace('SHAKENAME', hostname) + src = src.replace('YEAR', year) + src = src.replace('STATION', sta) + + if method == 'src': + """This does not work from within a virtual environment!!!!""" + #import pexpect + import sys + #from pexpect import popen_spawn + import time + import wexpect + + scp_command = 'scp -r {} "{}"'.format(src, dest) + + print('Command:', scp_command) + child = wexpect.spawn(scp_command, timeout=5) + + child.expect("password:") + child.sendline(password) + + child.expect(wexpect.EOF) + + print("Files have been successfully transferred to {}!".format(dest)) + elif method=='rsync': + if verbose: + opts = opts + 'v' + if save_progress: + opts = opts + 'p' + + #import subprocess + #subprocess.run(["rsync", "-"+opts, src, dest]) + #subprocess.run(["rsync", "-"+opts, src, dest]) + + import pty + #Test, from https://stackoverflow.com/questions/13041732/ssh-password-through-python-subprocess + command = [ + 'rsync', + "-"+opts, + src, + dest + #'{0}@{1}'.format(shakename, hostname), + #'-o', 'NumberOfPasswordPrompts=1', + #'sleep {0}'.format(sleep_time), + ] + + # PID = 0 for child, and the PID of the child for the parent + pid, child_fd = pty.fork() + + if not pid: # Child process + # Replace child process with our SSH process + os.execv(command[0], command) + + while True: + output = os.read(child_fd, 1024).strip() + lower = output.lower() + # Write the password + if lower.endswith('password:'): + os.write(child_fd, password + '\n') + break + elif 'are you sure you want to continue connecting' in lower: + # Adding key to known_hosts + os.write(child_fd, 'yes\n') + elif 'company privacy warning' in lower: + pass # This is an understood message + else: + print("SSH Connection Failed", + "Encountered unrecognized message when spawning " + "the SSH tunnel: '{0}'".format(output)) + + return dest
+ + +def _run_docstring(): + """This function updates the docstring the sprit.run() function, for documentation or help(sprit.run()) purposes + + Returns + ------- + str + Updated Docstring with more specifics about functions and their parameters + """ + nl = '\n\t' + dsIntro = sprit_hvsr.run.__doc__.split('Parameters')[0] + dsParameters = ('Parameters'+sprit_hvsr.run.__doc__.split('Parameters')[1].split('Returns')[0]) + dsReturns = (' Returns'+sprit_hvsr.run.__doc__.split('Returns')[1]) + + functionList = [sprit_hvsr.input_params, sprit_hvsr.fetch_data, sprit_hvsr.calculate_azimuth, + sprit_hvsr.remove_noise, sprit_hvsr.generate_ppsds, sprit_hvsr.process_hvsr, + sprit_hvsr.remove_outlier_curves, sprit_hvsr.check_peaks, + sprit_hvsr.get_report, sprit_hvsr.export_data] + + funcStrList = [] + funcParams = [] + funcDefaults = [] + prevOutputList = ['params', 'hvsr_data', 'hvsr_results'] + requiredList = [] + for func in functionList: + parameters = inspect.signature(func).parameters + defaults = [param.default for param in list(zip(*parameters.items()))[1]] + parameters = list(zip(*parameters.items()))[0] + + for i, d in enumerate(defaults): + if 'kwargs' in parameters[i]: + defaults[i] = {} + elif d is inspect._empty: + if any(o in parameters[i] for o in prevOutputList): + defaults[i] = '<output of previous function>' + else: + defaults[i] = '<no default>' + + funcDS = func.__doc__.split('\n')[0][:100] + baseURL = r"https://sprit.readthedocs.io/en/latest/sprit.html#sprit." + funcURL = baseURL+func.__name__ + firstLine = f"\n {func.__name__} : function name (not an actual parameter) \n\t{funcDS}\n\tSee API documentation: [{func.__name__}()]({funcURL})" + followingLines = '' + for i, param in enumerate(parameters): + followingLines += f"\n {param}"#.ljust(25) + if isinstance(defaults[i], str) and defaults[i]!='<positional, no default>': + followingLines += f": any, default = '{defaults[i]}'\n\tSee API documentation at link above or at `help(sprit.{func.__name__})` for specifics." + else: + followingLines += f": any, default = {defaults[i]}\n\tSee API documentation at link above or at `help({func.__name__})` for specifics." + + #funcDefaults.append(['<positional, no default>' if d is inspect._empty else d for d in defaults]) + #funcParams.append(list(zip(*parameters.items()))[0]) + + funcString = firstLine + followingLines + funcStrList.append(funcString) + + run_docstring = dsIntro + dsParameters + f"{nl.join(funcStrList)}\n\n" + dsReturns + return run_docstring + +#Time functions, for timing how long a process takes +
+[docs] +def time_it(_t, proc_name='', verbose=True): + """Computes elapsed time since the last call.""" + t1 = datetime.datetime.now().time() + dt = t1 - _t + t = _t + if dt > 0.05: + if verbose: + print(f'[ELAPSED TIME] {dt:0.1f} s', flush=True) + t = t1 + return t
+ + +#Get x mark (for negative test results) +
+[docs] +def x_mark(incolor=False, inTerminal=False): + """The default Windows terminal is not able to display the check mark character correctly. + This function returns another displayable character if platform is Windows""" + + if incolor: + try: + xmark = get_char(u'\u274C') + except Exception: + xmark = get_char(u'\u2718') + else: + xmark = get_char(u'\u2718') + return xmark
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/index.rst b/docs/_sources/index.rst.txt similarity index 100% rename from docs/index.rst rename to docs/_sources/index.rst.txt diff --git a/docs/sprit.rst b/docs/_sources/sprit.rst.txt similarity index 100% rename from docs/sprit.rst rename to docs/_sources/sprit.rst.txt diff --git a/docs/sprit.sprit_cli.rst b/docs/_sources/sprit.sprit_cli.rst.txt similarity index 100% rename from docs/sprit.sprit_cli.rst rename to docs/_sources/sprit.sprit_cli.rst.txt diff --git a/docs/sprit.sprit_hvsr.rst b/docs/_sources/sprit.sprit_hvsr.rst.txt similarity index 100% rename from docs/sprit.sprit_hvsr.rst rename to docs/_sources/sprit.sprit_hvsr.rst.txt diff --git a/docs/sprit.sprit_jupyter_UI.rst b/docs/_sources/sprit.sprit_jupyter_UI.rst.txt similarity index 100% rename from docs/sprit.sprit_jupyter_UI.rst rename to docs/_sources/sprit.sprit_jupyter_UI.rst.txt diff --git a/docs/sprit.sprit_plot.rst b/docs/_sources/sprit.sprit_plot.rst.txt similarity index 100% rename from docs/sprit.sprit_plot.rst rename to docs/_sources/sprit.sprit_plot.rst.txt diff --git a/docs/sprit.sprit_streamlit_ui.rst b/docs/_sources/sprit.sprit_streamlit_ui.rst.txt similarity index 100% rename from docs/sprit.sprit_streamlit_ui.rst rename to docs/_sources/sprit.sprit_streamlit_ui.rst.txt diff --git a/docs/sprit.sprit_tkinter_ui.rst b/docs/_sources/sprit.sprit_tkinter_ui.rst.txt similarity index 100% rename from docs/sprit.sprit_tkinter_ui.rst rename to docs/_sources/sprit.sprit_tkinter_ui.rst.txt diff --git a/docs/sprit.sprit_utils.rst b/docs/_sources/sprit.sprit_utils.rst.txt similarity index 100% rename from docs/sprit.sprit_utils.rst rename to docs/_sources/sprit.sprit_utils.rst.txt diff --git a/docs/_build/doctrees/environment.pickle b/docs/environment.pickle similarity index 97% rename from docs/_build/doctrees/environment.pickle rename to docs/environment.pickle index b4c6413ac8882f3f0d3eaab6b3b38ad506be0a59..65a95f8dfbc5c22465cddce1ee44bac2fbfdc944 100644 GIT binary patch delta 7505 zcmZu$dtB7jwr_#V@S7P1hBu00KEkWV0aHL6kVl#r2|mkXSH}qlnc*=LGmzFv((^F5 zQ$Bv!?r}}KCy#YXkM`4kJxu79oy<~D0ToevKTtuFvQFt)d+*@Y}!j-D`dK zT5GSpW;4_JLq_Wl89S29njnw6(%~gZ-4HLD%>%_}%?TmyGMBgB7v!i|VqaD-Chv<3 z@I{lHDu|A#_k}TiGkaP~sy&rrt2q@?#E_&^O=jQ&5BLU($>wO$nxq+)M#aef`jpsQ z>G9S()9GA1vCW4#adT3TCY71eMPf8eAw&EoDIP51g`|YQ$I?4al(8k6dTj0iEA@-;`1q6#bIf)#fK*ph{IB!(o!#|))#40 z#C?`Rm@0-%Ocp;%{ajeff~Iq?q$i5TiSxvj6Y|8!)G#gePQ{0%O%G-ncQ3C;rMddG zEH60io5QF!P11;klM=*+^xwmD1&J4E5L72$bX#&oAw!SkPr_`Jrij576OK=akr}@O zi@D#*vN` zh!qc@FgAC%XwGqotFoz9xmYxLxHvO+72oTF=mOvCcKXKl`Z9fEd)-Ul*j@(=z&Ezn zq4bUIbv%8uNO6yR7evMOk(gu{A+=Bc1JE~8-v+j&SJG0hM~W`rZ>^f|JE*Hni{ zL*^Y(BwRt#KiS$(`q%tpiWFZMA^o&qMgaF}-oowuv<6#S6ls51IB7ZzLDE3mw+fVe zR*>#KeNvHXs>4b45zvaMg(fM=-l($l3Q~9Rg?=VCO4`*qjC-(EAl*kmBaZAbNr9!i z`INN3tV2;kUTJ^D_liW_SbG%R*z4Zc$0BX4JfTQ5AM4+OC^768lO%hUbVxPTOL%}{ z<1Rt^ZJ8QYO|>9hdrmbj^y$d%O<>ya2IeyGgt+rXYQ` z>X4#ptuspR{pq`YhHL(!Y=Y!@NeNuC)}7`WQlU?M%X+0RVnIq>t8N|TAHKeiUyuy- zDr3)v;|k9S($;Uy1FttJ_y=#QZ!d3DRww3gEG6$d5G}p7=_|#`S|>==Zy!}8nw2Gg zRdYncpR+}sqotIyRasXo^a*6k2?&=SeNWww#$7t;q4y6e4g_se0u))fY*!7b)~y}Q z%7Ron^`Rn3jl0weS^KG8l0Q=FR(k8>GYae4PtWr=7L4@Dp2G^!%NqshxxGqMQemSS zee(ZSD;n~bEs!F;{8e)>2`vVf+A((eq4F@9~fL zoVdMtuympwtA^SzsigxG$XMx0;`eQ#q@WWI*WCps>FVi}06rJ94&BQ-`**)#j2Lz; zh^*-ZjWqcD*9ybTmB?_f*C%5!`utIB}k!Hu`w{nIv?Zt zd?y-*Ueyq*PSCO5lX~s5ehlj`8>NIBn101|=7E&ZqXa|zi?^3WOG&rzeEXLpoo7KO zep?YEjlQkoR9(8)tGIxvi23jiZa*V$Xp32YM@Kfl1zIWWmv8zpiPUk;uk)0&&{(7w z@19dMX69iWqs~Lj?l<6{3fK1L2(n@TXk=diD0@W~Yal{K5Nn36C!>sgi@B>r5k$7_ z0xb#IjXN>=hDojs25c~W0nu5xOb2T4N#V%2&;Whui6HvXdZVa)HC*-@0UHc6E}V%T zb`ORic}AFjc{Iiz!{+}n26@bpfJcnUbl=6Svq7ZmB0b8Ff=PZj8YE?w#i5G@Jh4<; z#&ld(rur88ZIJ2}p`eVbEBdVH#qL8vB>ofJpV)0CIdJHGmKG&C$!bQrdl`YAz9H<;l7QBG@$uR5WQ{yNr!)O&S?DTk83F4yfU!Zq^#bW0r ze(GggDy#q$!%5w-u* zdou1KqoNZsPV_QyYg#S@P+V+iGm=GlNO|=dgpxlzhC8@w3Vh0W3m%0Y{)SH9VsCx} zx%@Fq9C8e$_L^63U{`r(k4awtI93x|5;r4Ob{Ak-Q&C3of`1}XUNBqr9(&t5AWh3K zo93J(;&=k`NYbCxY_>cB%Ye$^GRFS{e`T2=s z1B3k1B3cLxe5njGL@mjReVCyQivgQ7qet_b$}4uzw}C89VH-S_ykCqig=MQQ#ViHv z%P5BvENLUz>qer_O5i;XZmo7g0GpO^vAZ0UU#kF(g6?p`D*j5#lA7z~g&yiQ<&?3} zUQ+1sHBO%OEYyH_AB-cF;dnqjA zhTmZGcnWcgSAv1;Xwwdmm(}2Dp$cesiN0WO1%`2mOWOM+;XX-!pA@1>a_(PqGZS&9BIm~ohN7$)KZp2IL6 zrLBjVLC0ZSHHE|+fixTnkHBOcUO58k+=njWYKFn20k6XJ`W%9jza3%rrF__U zl&yRCC}f~YXbWST)&dW6nm}^11l>LBY?KQwn7TW2ldmQ-KJ^}@vk`)wKun^$sWOR z^l%8`hGXzB4q3<4>9Ks;cMJw|_GAuxc}xvvJOu`ke;tEJv(|U!_q28 zy}FgebsRELbK7y2&NIg$jWbW>M2~HOkYK-Z!3jv^)A^halcpudw?KHGPQwY9z;$ME zosf2j<-lAHWVORE4m?Sk+c9aKCtv_s-43A~UCxmx*q15JS~9c~%wkiEhU{p9V4T~3 z3Le8D@-$4vVZmw0#^KG=kcC4#9|m^=+Ge5!$&uD_!v z=5x#{^&Ct?jTPtEQeT`?6%X*p7hlIuA-oR`;pPVw^gN8>Kr_J({Sdt#8b~8nK1Yvo z6t8#&a?g*papR#EAd#z`;J~qS_$kFMi+CLrLX>)8-y?Kz4crhddH({|Ekir~UWBtN zqERhKHhYfGI(yOc&6n}a&j^E%`&cjbV=cX?vR>inFCA$2kBbn7S!uio794)PsAl#S z*I^#l4Kak0uoG;fS6pJ-x0erhFF^+P^e#^Xy#wCwB#?F2v6-k&#&Pul934oL-gy}k zAoJBLj5zxW%!o9r0iUzJ?YfK}Pu43(^ks2H{>>HTD+@j+u#6ddw=_)*; zVc*A$;P~7wJmm~X&I+{Q7km%Yj@DuoT_<0!w5~ySmua z`=@kdV}6j?$^ z7y$eBY960V{1NRMx?wm+=W`U@$F19k54`00*JE~1_0yQy14EJYnI0&>p|J-Zbz9HWrnIg!W>4vXEK5ScVp| zjt|Yf=!S(De}w6rD`Gy0O`-EgEYYkE2!i*y z5=*sk2qM%iV$N-+lM$r!Cm4zCcim6yG=BdR3`XlNI;0cf4i2d#5r+g8Xer(@O43Tw zYQ6O<>zz@F7-IoGMma5V(H*=zvP;~;dUn0Rfwl}B=t;qW76uM9^*B&I94xDCzX1LP z;9nsA4Zy!3{L|o{R<`{TJakYJrSTc_o|rQ`*XAuMF&DY;dt65D3+qGe%iOL?{6Za@ zd6X~|J4%=FItF>sTx8wk0(3u`xny4V%>$mDr0)t!}%mR1Hkey-4X;(IbZe1X|)^N8R<#a;zfpo_R(cEE! zqrzR~wYqJUHva}0Xw+8QbB^*VZ;8S$FpU1*q5uYWrK`y9@vz@u6fB%3#Okedl{+e2 z)?!DQ-S2Xxz1&r8w^q0u9y`}2e~K02xD$buRTT<_ktfhm;qlrkiWF5}h?21T;tR(3 ztX@0)HHQbuT=aO+iE;%Bq0`pls){17!=)@y<>Nm&)#Fc0COJM%m^zD{ZTb^aE5z2& zQ|i-kH_C05rFd8UET3 zL<|b1+f`Iz!*nt^s>J5O?R%z*+;s|}c?>g_+e+E!22^@hz+v=8`%IWuQZpKHx=yLn?EU+4_+ zthf52$lGNQt!4cJGqbY-Vgj5Qcp_^&)&zC}OHn2}cJeRiB#)vV>k8-^F+?DofhyB_6uu8iJ zjWN&Z#Qd(2cFM=!H6{$84UW9{H)9-Uiy=GrLTnK0d!%8DaS(?S$hvFrAXkR1##sJ# z`ftYQTuR`J#3SIbTRm=jk(CuvnZy4(p}*Gofn)jt+Xe-Y?e7`CS(8h9tj~yvp`~xF zLK@m17^+7Ut6VvO9Ks(yE0m+BXho2fiy_jin0W2WSdCOH#j0T@rf2cDQoGgbvN|e? QT~@E{X`CQC(?du88_y!`7ytkO delta 7579 zcmZu$dq9-cwr_#Vdj^I#0%<4B8qC1B^9;J_5`k$-d?F(%A6}q@8I6L4ldLHdR`eX^`5+ZLXjYejSSnGE0v1xsv745z-^$^J3i9 zzU6*rrG4Hsy45-sFO_FU+B6-Wsh=5Pc{Itmjs=nKH=kLuXo6L;r_G0HxQR{LG9g+| zy`}53g%OcbLxxovH-0|M70AQT&p;_M^EY6Vb25hnVamLfGer8sq_fhC*<+=)?_#A7 zv$Cb4NkgT*ITo@#1qMmE+1b*@oT21kDm*OhN8)DIP$@abE4gson^PefbB0O=DyliqLEk=7;{ zD0Q@0<>|9_3x@bPKM00ZH75DNd51a0bjrr3j`k)0x!{=KO`I7n|Fm#=5VvIhqK3X~ ziS`!3wy)eunhrsTJkYU^L-Oq<=fvWw8Y{^@3|h%H(<(U|0rbS!n#w!u7ZF%%YLQT zWu0tWC!{2*f9Qr@{W{rH-|tZGzS<;|wGnZ=%*p+KQt^g2g}D66MqvU;2?rhVzXQ?o z>zlslFv6$a5YUk=CoqQ}e@`5XoVcx3Sfy!D zUA+DN_d+0Xn_0fST|HH#$nG|@$6J7L|L2Fjao5ZHb~Fn;4Ig!g*`|-hOltVZAiw!{ zVFvR_er5L|vB@hNb@Fq2gmZGiUNs0~|GQdneEj(l$dF(D0*+Nz|Sjg2=5G5ZtjEBN7Pcg>^$sH}|A(Loqy)IU- z$3cYD)Mg-i{|mEzJm?xmUYO1-kOy5C z&XAtX+sdQml$&_Zd$uDTXTTus^Tfy_Z>czCm+yBA6EGApAKt;-X385|V>a9|kj-y_ zRu235>pq$!YPjZ?c_J*-7kSk^tT$}0f;{wN#5suB`3C$$=z707f~*(-8YK_};;iWX zj!+Q9+e76Pq_KE$6+DC?q+thWiFqfU#K>z_rB(-6XBY?FldUS{22h>HyB47=GC^;6 zB8YLM(IRPI3s?LWzp1YKFCWLgJ z2R-pW4p!xnXpqG!^TQAecxNfMf~H**OT8QY8n0SKD5&7>f}bV5)O9d~Bz%JB6Wd@_ zf(Q3gP69jhM&`WL{u z!R$qEReq5@=;$TnX&eoHkbcPXs;~5=8|Lx1sgf}Umcxd5;bh?p7|Q*-z@VHOiyA4! z&YwZ2$H8hKp(Ws@J%ES2{X1@TKMk%5nq7m2PB4;_pW-o$oB((= zDW`&l-xW-3+cDXA8uvRPQpuh~kCCaPD+;c3b9!r9E(Ea^sj<~U7U!Yf%U2+j{Pqbv z#TO^T--YrSj{}wtwhuaRlTUkOl!_;4{MZikB2z`0*RElSd1tp(+3+MyEw+e96RY@U z&?INNkjA7|mm-ygvwPgZVz(a1xD;$xl5Ut)+}Ed;F4JwoK_mtayhECK8< zm}EsU{28Rh-_gS!O}3Rlp44$coOaAo@Bzs-z+ke+hq`u^!h3+r;ah9m5JV@+AIsq$ zgOt}jpyAkdAH2wBG%cy5QCU=lrZNGnvlrNm;4Ds=^(?FevVR^1qaZhllq|u}3T4(^7bO2uCx!wRX&d%NIafJ~)nH%!-%cX#3K8}&Kl z1;a#wOGC)Dudp_3{tBdEuqosBK}N_ZYDX<|vzB>T%bcubKGrf9Yng|&=-?X%VH!Gk z?jWS#5ZMIjkWOYa!8i@o;{J>rZ-${#%1o`r-8k%l2SzZL-CqnLZ=Z%(+P6Q1Vt1Qh z0uJMw0sqw{?q(Q|!zMl)X$Bio#zT;d!-PZdC~i=72*x3`^$-(iKZLtxkeI`ehQrLm zFbRiO4nsP(p_6!lln9O;l_&Uk)6DM84}74t#!0b!Hp|;>kacK_p6s9aoJ`=ZwFe1g&XlwX0q|O5!~Z z6OpswI1A_Z$03btp28J9u?5V!9_AS*z{Z#JxjqbA`s%D;K+4oEj?&5gk zNf^eJm2&_c52pcXXGxyj&Qj6ajwK?U*l~Ts9S!-{Bbe^#Wak;k=1YxCbi*AZNj}S@ zY-eFAa;!MZw)*U>%DA6-j!f%g(D>FLCtec9i?uc?iR(>^%<|IQ(*6jqFX%!z`|Q z&=g9-POyVsae*D*9zNW=0GZsB)>-Jdx~dBo|-7Q)j|sIMd22 zSa=yQjsm}-?OZf!!v=B+$J03@=GG|016N@x$Fn$o^(u_!KrRPZlj>|Mv^5$YY7S^TepTU=~L}8 zfc>yFk1rmJ!Kn0^>o5a{z1QJ!uBDjs z48DO?oe3@FK*bHrhl%9n8~ApbNcP=8v56$^COnBl$xWD!!;YIU7YF^1@E8u$`LG@b z4f_%9C9VixJp;i;^tWJwhW>);uH!f!?pVav-oiJQjU2dzNo6CU-B_M%WL7t(p^dEP zLvuIUU?Y~>FpX>YGuMFE!c11&h6Ijo;V9mBjK_X#)M%4Mk};wveDXGyO}70u&c-6G zKL`=(39-fZ>0$&a`w50)0bc(TyN}=f1c{jGopi_~`a3wJkt7@jF+a<&&nQhRO{?|S zuda7TC83Km@G-(|Q;P0jKgs&IMfI$~!GRVH9Oy;Cf#wAcH1ar5eK^?GIDQ8F3&Ou( z{2PFOA^4}kKds{USvNR7g{lcy^5)N(o$K%yl_nQ?@h4!WJP_84I+yvpmH6{Kmh&k6 zV5}|8QTq7FOg6Na%jI`gR0-ieD$AV9{~wh8s7w`(iPpztGfg_GX|lub5Gu?n-Pb6# z9c4=Hu?amOGYt9!R%rqewMWDsg@jMhhuVw!t3^|7TFR86@bt&fi7PA#FquovS# z_DZLtm>N1dMjyM9ts09;okeAKpVLvMI%ee7FbQFPsDP@Re!t7JROK|3I91hx(X!O( zaaKC~PP@-nRjl%vmOB0R3a8&ejS!rMQ539nio>8s7t9(PZkDTpE~qw;t{8nZH`wIz z_^SPOpQF;zb3i8QwcYWYtD@RpD)j3a#=i3K6fpTJy+zKdDtjL+oCd`1uk=>9JYIW= ztK8XRa;3AvTjR8Qysj!IS4n;stB>PG1XosjRF8Q8U7jkx!&4+!17;Dhe*WpuKfB*a zzYTFGnTb_?G@?R4X1Z)IsrD54U0$(8wfc1mzfW0}ggE__S?t;dqDjtCh+RoVNE!B*i zg48Ut3(LtwlPQ5ruLi@K=gai?XHcFmGnzI>(K-;&&ncKaZLU4X=i`Ned}c5uu+kcc zB5#*Nw3gKijLZifhzW8}B)5#FVZ)iM!RtZ0yj7ULTsUB;bT0AI#9`{z;LgfKllk4Y zNLq*jM(S{}v&2zdj%KDRcQ2T4G_sc>-b=?COT;MgiH~QWuhLoLaz5J+t8`YOFvdBP zB)@B+mGa4VErSNo0!LQ;#S+K0#gGs7Kx_!Bd!%uTC7#2B$oebr5NC$1mRLSJ{TEAg zE>#eS#4Awcv{(6@Mf6Lh-(K$O`Rdq{>-@qoemy$|0VvM*EMKq9r8PES!NAbew^ySY zS|1o{L=>}J+&~WDH&TzddV(v0tS*GeWFg{rE@L_3S&CW1M6A!^XH}=&@5S$bC0@JV Nu>=>$2kD{1{|Cxs_KpAm diff --git a/docs/genindex.html b/docs/genindex.html index 4d6910ef..23c7f332 100644 --- a/docs/genindex.html +++ b/docs/genindex.html @@ -4,18 +4,18 @@ Index — sprit 1.4 documentation - - + + - - - - + + + + diff --git a/docs/_build/doctrees/index.doctree b/docs/index.doctree similarity index 100% rename from docs/_build/doctrees/index.doctree rename to docs/index.doctree diff --git a/docs/index.html b/docs/index.html index 2601f438..b13233c7 100644 --- a/docs/index.html +++ b/docs/index.html @@ -5,18 +5,18 @@ Welcome to the documentation for SpRIT! — sprit 1.4 documentation - - + + - - - - + + + + diff --git a/docs/main.html b/docs/main.html index 00b9e31e..6a35a8bc 100644 --- a/docs/main.html +++ b/docs/main.html @@ -2,18 +2,21 @@ - - + + sprit API documentation - - - - - - + + + + + + - - + +
@@ -23,120 +26,6 @@

Package sprit

This module analysis ambient seismic data using the Horizontal to Vertical Spectral Ratio (HVSR) technique

-
- -Expand source code - -
#__init__.py
-"""
-This module analysis ambient seismic data using the Horizontal to Vertical Spectral Ratio (HVSR) technique
-"""
-try:
-    import sprit.sprit_utils as sprit_utils
-    import sprit.sprit_gui as sprit_gui
-    import sprit.sprit_hvsr as sprit_hvsr
-    import sprit.sprit_jupyter_UI as sprit_jupyter_UI
-except:
-    import sprit_utils
-    import sprit_gui
-    import sprit_hvsr
-    import sprit_jupyter_UI
-
-from sprit.sprit_hvsr import(
-    run,
-    azimuth,
-    export_data,
-    export_settings,
-    import_data,
-    import_settings,
-    input_params,
-    gui,
-    get_metadata,
-    fetch_data,
-    batch_data_read,
-    generate_ppsds,
-    process_hvsr,
-    plot_hvsr,
-    read_tromino_files,
-    remove_noise,
-    remove_outlier_curves,
-    check_peaks,
-    get_report,
-    HVSRData,
-    HVSRBatch,
-)
-
-from sprit.sprit_utils import(
-    check_gui_requirements,
-    checkifpath,
-    check_mark,
-    check_tsteps,
-    check_xvalues,
-    format_time,
-    get_char,
-    has_required_channels,
-    make_it_classy,
-    read_from_RS,
-    time_it,
-    x_mark
-)
-
-from sprit.sprit_gui import(
-    catch_errors
-)
-
-from sprit.sprit_jupyter_UI import(
-    create_jupyter_ui
-    )
-
-__all__ =('sprit_hvsr',
-            'run',
-            'azimuth',
-            'check_mark',
-            'get_char',
-            'time_it',
-            'checkifpath',
-            'export_data',
-            'export_settings',
-            'import_data',
-            'import_settings',
-            'input_params',
-            'gui',
-            'get_metadata',
-            'has_required_channels',
-            'fetch_data',
-            'batch_data_read',
-            'generate_ppsds',
-            'process_hvsr',
-            'plot_hvsr',
-            'read_tromino_files',
-            'remove_noise',
-            'remove_outlier_curves',
-            'check_peaks',
-            'get_report',
-            'HVSRData',
-            'HVSRBatch',
-        'sprit_utils',
-            'check_gui_requirements',
-            'checkifpath',
-            'check_mark',
-            'check_tsteps',
-            'check_xvalues',
-            'format_time',
-            'get_char',
-            'has_required_channels',
-            'make_it_classy',
-            'read_from_RS',
-            'time_it',
-            'x_mark',
-        'sprit_gui',
-            'catch_errors',
-        'sprit_jupyter_UI',
-            'create_jupyter_ui'
-            )
-
-__author__ = 'Riley Balikian'
-

Sub-modules

@@ -145,10 +34,6 @@

Sub-modules

This module/script is used to run sprit from the command line …

-
sprit.sprit_gui
-
-

This script contains all the functions, classes, etc. to create a tkinter app for graphical user interface.

-
sprit.sprit_hvsr

This module is the main SpRIT module that contains all the functions needed to run HVSR analysis …

@@ -157,6 +42,18 @@

Sub-modules

Functions to create jupyter notebook widget UI

+
sprit.sprit_plot
+
+
+
+
sprit.sprit_streamlit_ui
+
+
+
+
sprit.sprit_tkinter_ui
+
+

This script contains all the functions, classes, etc. to create a tkinter app for graphical user interface.

+
sprit.sprit_utils
@@ -168,8 +65,47 @@

Sub-modules

Functions

-
-def azimuth(hvsr_data, azimuth_angle=10, azimuth_type='multiple', azimuth_unit='degrees', show_az_plot=False, verbose=False) +
+def assert_check(var, cond=None, var_type=None, error_message='Output not valid', verbose=False) +
+
+
+
+
+def batch_data_read(batch_data, batch_type='table', param_col=None, batch_params=None, verbose=False, **readcsv_getMeta_fetch_kwargs) +
+
+

Function to read data in data as a batch of multiple data files. This is best used through sprit.fetch_data(args, source='batch', *other_kwargs).

+

Parameters

+
+
batch_data : filepath or list
+
Input data information for how to read in data as batch. Can be filepath or list of filepaths/stream objects. +If filepath, should point to .csv (or similar that can be read by pandas.read_csv()) with batch data information.
+
batch_type : str, optional
+
Type of batch read, only 'table' and 'filelist' accepted. +If 'table', will read data from a file read in using pandas.read_csv(), by default 'table'
+
param_col : None or str, optional
+
Name of parameter column from batch information file. Only used if a batch_type='table' and single parameter column is used, rather than one column per parameter (for single parameter column, parameters are formatted with = between keys/values and , between item pairs), by default None
+
batch_params : list, dict, or None, default = None
+
Parameters to be used if batch_type='filelist'. If it is a list, needs to be the same length as batch_data. If it is a dict, will be applied to all files in batch_data and will combined with extra keyword arguments caught by **readcsv_getMeta_fetch_kwargs.
+
verbose : bool, optional
+
Whether to print information to terminal during batch read, by default False
+
**readcsv_getMeta_fetch_kwargs
+
Keyword arguments that will be read into pandas.read_csv(), sprit.input_params, sprit.get_metadata(), and/or sprit.fetch_data()
+
+

Returns

+
+
hvsrBatch
+
HVSRBatch object with each item representing a different HVSRData object
+
+

Raises

+
+
IndexError
+
description
+
+
+
+def calculate_azimuth(hvsr_data, azimuth_angle=30, azimuth_type='multiple', azimuth_unit='degrees', show_az_plot=False, verbose=False, **plot_azimuth_kwargs)

Function to calculate azimuthal horizontal component at specified angle(s). Adds each new horizontal component as a radial component to obspy.Stream object at hvsr_data['stream']

@@ -199,521 +135,18 @@

Returns

Updated HVSRData object specified in hvsr_data with hvsr_data['stream'] attribute containing additional components (EHR-***), with *** being zero-padded (3 digits) azimuth angle in degrees.
-
- -Expand source code - -
def azimuth(hvsr_data, azimuth_angle=10, azimuth_type='multiple', azimuth_unit='degrees', show_az_plot=False, verbose=False):
-    """Function to calculate azimuthal horizontal component at specified angle(s). Adds each new horizontal component as a radial component to obspy.Stream object at hvsr_data['stream']
-
-    Parameters
-    ----------
-    hvsr_data : HVSRData
-        Input HVSR data
-    azimuth_angle : int, default=10
-        If `azimuth_type='multiple'`, this is the angular step (in unit `azimuth_unit`) of each of the azimuthal measurements.
-        If `azimuth_type='single'` this is the angle (in unit `azimuth_unit`) of the single calculated azimuthal measruement. By default 10.
-    azimuth_type : str, default='multiple'
-        What type of azimuthal measurement to make, by default 'multiple'.
-        If 'multiple' (or {'multi', 'mult', 'm'}), will take a measurement at each angular step of azimuth_angle of unit azimuth_unit.
-        If 'single' (or {'sing', 's'}), will take a single azimuthal measurement at angle specified in azimuth_angle.
-    azimuth_unit : str, default='degrees'
-        Angular unit used to specify `azimuth_angle` parameter. By default 'degrees'.
-        If 'degrees' (or {'deg', 'd'}), will use degrees.
-        If 'radians' (or {'rad', 'r'}), will use radians.
-    show_az_plot : bool, default=False
-        Whether to show azimuthal plot, by default False.
-    verbose : bool, default=False
-        Whether to print terminal output, by default False
-
-    Returns
-    -------
-    HVSRData
-        Updated HVSRData object specified in hvsr_data with hvsr_data['stream'] attribute containing additional components (EHR-***),
-        with *** being zero-padded (3 digits) azimuth angle in degrees.
-    """
-          
-    degList = ['degrees', 'deg', 'd']
-    radList = ['radians', 'rad', 'r']
-    if azimuth_unit.lower() in degList:
-        az_angle_rad = np.deg2rad(azimuth_angle)
-        az_angle_deg = azimuth_angle
-    elif azimuth_unit.lower() in radList:
-        az_angle_rad = azimuth_angle
-        az_angle_deg = np.rad2deg(azimuth_angle)
-    else:
-        warnings.warn(f"azimuth_unit={azimuth_unit} not supported. Try 'degrees' or 'radians'. No azimuthal analysis run.")
-        return hvsr_data
-    
-    #Limit to 
-    if az_angle_deg <= 1:
-        if verbose:
-            warnings.warn(f"Minimum azimuth rotation is 1 degree (max. is 180). You have selected {az_angle_deg} degrees ({az_angle_rad} radians). Converting to azimuth_angle=1 degree ({np.round(np.pi/180,3)} radians) ")
-        az_angle_deg = 1
-        az_angle_rad = np.pi/180
-    elif az_angle_deg >= 180:
-        if verbose:
-            warnings.warn(f"Maximum azimuth value is azimuth_angle=180 degrees (min. is 1). You have selected {az_angle_deg} degrees ({az_angle_rad} radians). Converting to azimuth_angle=180 degrees ({np.round(np.pi,3)} radians) ")
-        az_angle_deg = 180
-        az_angle_rad = np.pi
-
-    multAzList = ['multiple', 'multi', 'mult', 'm']
-    singleAzList = ['single', 'sing', 's']
-    if azimuth_type.lower() in multAzList:
-        azimuth_list = list(np.arange(0, np.pi, az_angle_rad))
-        azimuth_list_deg = list(np.arange(0, 180, az_angle_deg))
-    elif azimuth_type.lower() in singleAzList:
-        azimuth_list = [az_angle_rad]
-        azimuth_list_deg = [az_angle_deg]
-    else:
-        warnings.warn(f"azimuth_type={azimuth_type} not supported. Try 'multiple' or 'single'. No azimuthal analysis run.")
-        return hvsr_data
-
-    eComp = hvsr_data['stream'].select(component='E').merge()
-    nComp = hvsr_data['stream'].select(component='N').merge()
-
-    statsDict = {}
-    for key, value in eComp[0].stats.items():
-        statsDict[key] = value
-    
-    for i, az in enumerate(azimuth_list):
-        az_rad = az
-        az_deg = azimuth_list_deg[i]
-        statsDict['channel'] = f"EHR-{str(round(az_deg,0)).zfill(3)}" #Change channel name
-        statsDict['azimuth_deg'] = az_rad
-        statsDict['azimuth_rad'] = az_deg
-        
-        hasMask = [False, False]
-        if np.ma.is_masked(nComp[0].data):
-            nData = nComp[0].data.data
-            nMask = nComp[0].data.mask
-            hasMask[0] = True
-        else:
-            nData = nComp[0].data        
-            nMask = [True] * len(nData)
-        
-        if np.ma.is_masked(eComp[0].data):
-            eData = eComp[0].data.data
-            eMask = eComp[0].data.mask
-            hasMask[1] = True
-        else:
-            eData = eComp[0].data
-            eMask = [True] * len(eData)
-
-        print(az_angle_rad, az)
-
-        if True in hasMask:
-            radial_comp_data = np.ma.array(np.add(nData * np.cos(az), eData * np.sin(az_angle_rad)), mask=list(map(operator.and_, nMask, eMask)))
-        else:
-            radial_comp_data = np.add(nData * np.cos(az), eData * np.sin(az))
-        #From hvsrpy
-        # horizontal = self.ns._amp * math.cos(az_rad) + self.ew._amp*math.sin(az_rad)
-        
-        radial_trace = obspy.Trace(data=radial_comp_data, header=statsDict)
-        hvsr_data['stream'].append(radial_trace)
-    
-    return hvsr_data
-
- -
-def batch_data_read(input_data, batch_type='table', param_col=None, batch_params=None, verbose=False, **readcsv_getMeta_fetch_kwargs) -
-
-

Function to read data in data as a batch of multiple data files. This is best used through sprit.fetch_data(args, source='batch', *other_kwargs).

-

Parameters

-
-
input_data : filepath or list
-
Input data information for how to read in data as batch
-
batch_type : str, optional
-
Type of batch read, only 'table' and 'filelist' accepted. If 'table', will read data from a file read in using pandas.read_csv(), by default 'table'
-
param_col : None or str, optional
-
Name of parameter column from batch information file. Only used if a batch_type='table' and single parameter column is used, rather than one column per parameter (for single parameter column, parameters are formatted with = between keys/values and , between item pairs), by default None
-
batch_params : list, dict, or None, default = None
-
Parameters to be used if batch_type='filelist'. If it is a list, needs to be the same length as input_data. If it is a dict, will be applied to all files in input_data and will combined with extra keyword arguments caught by **readcsv_getMeta_fetch_kwargs.
-
verbose : bool, optional
-
Whether to print information to terminal during batch read, by default False
-
**readcsv_getMeta_fetch_kwargs
-
Keyword arguments that will be read into pandas.read_csv(), sprit.input_params, sprit.get_metadata(), and/or sprit.fetch_data()
-
-

Returns

-
-
dict
-
Dictionary with each item representing a different file read in, and which consists of its own parameter dictionary to be used by the rest of the processing steps
-
-

Raises

-
-
IndexError
-
description
-
-
- -Expand source code - -
def batch_data_read(input_data, batch_type='table', param_col=None, batch_params=None, verbose=False, **readcsv_getMeta_fetch_kwargs):
-    """Function to read data in data as a batch of multiple data files. This is best used through sprit.fetch_data(*args, source='batch', **other_kwargs).
-
-    Parameters
-    ----------
-    input_data : filepath or list
-        Input data information for how to read in data as batch
-    batch_type : str, optional
-        Type of batch read, only 'table' and 'filelist' accepted. If 'table', will read data from a file read in using pandas.read_csv(), by default 'table'
-    param_col : None or str, optional
-        Name of parameter column from batch information file. Only used if a batch_type='table' and single parameter column is used, rather than one column per parameter (for single parameter column, parameters are formatted with = between keys/values and , between item pairs), by default None
-    batch_params : list, dict, or None, default = None
-        Parameters to be used if batch_type='filelist'. If it is a list, needs to be the same length as input_data. If it is a dict, will be applied to all files in input_data and will combined with extra keyword arguments caught by **readcsv_getMeta_fetch_kwargs.
-    verbose : bool, optional
-        Whether to print information to terminal during batch read, by default False
-    **readcsv_getMeta_fetch_kwargs
-        Keyword arguments that will be read into pandas.read_csv(), sprit.input_params, sprit.get_metadata(), and/or sprit.fetch_data()
-
-    Returns
-    -------
-    dict
-        Dictionary with each item representing a different file read in, and which consists of its own parameter dictionary to be used by the rest of the processing steps
-
-    Raises
-    ------
-    IndexError
-        _description_
-    """
-    #First figure out columns
-    input_params_params = input_params.__code__.co_varnames
-    get_metadata_params = get_metadata.__code__.co_varnames
-    fetch_data_params = fetch_data.__code__.co_varnames
-
-    if batch_type=='sample':
-        sample_data=True
-        batch_type='table'
-    else:
-        sample_data = False
-    # Dictionary to store the stream objects
-    stream_dict = {}
-    data_dict = {}
-    if batch_type == 'table':
-        if isinstance(input_data, pd.DataFrame):
-            dataReadInfoDF = input_data
-        elif isinstance(input_data, dict):
-            #For params input
-            pass
-        else:#Read csv
-            read_csv_kwargs = {k: v for k, v in locals()['readcsv_getMeta_fetch_kwargs'].items() if k in pd.read_csv.__code__.co_varnames}
-            dataReadInfoDF = pd.read_csv(input_data, **read_csv_kwargs)
-            if 'datapath' in dataReadInfoDF.columns:
-                filelist = list(dataReadInfoDF['datapath'])
-            #dataReadInfoDF = dataReadInfoDF.replace(np.nan, None)
-
-        #If this is sample data, we need to create absolute paths to the filepaths
-        if sample_data:
-            sample_data_dir = pathlib.Path(pkg_resources.resource_filename(__name__, 'resources/sample_data/'))
-            for index, row in dataReadInfoDF.iterrows():
-                dataReadInfoDF.loc[index, 'datapath'] = sample_data_dir.joinpath(row.loc['datapath'])
-
-        default_dict = {'site':'HVSR Site',
-                    'network':'AM', 
-                    'station':'RAC84', 
-                    'loc':'00', 
-                    'channels':['EHZ', 'EHN', 'EHE'],
-                    'acq_date':str(datetime.datetime.now().date()),
-                    'starttime' : '00:00:00.00',
-                    'endtime' : '23:59:59.999',
-                    'tzone' : 'UTC',
-                    'xcoord' : -88.2290526,
-                    'ycoord' :  40.1012122,
-                    'elevation' : 755,
-                    'input_crs':'EPSG:4326',#4269 is NAD83, defautling to WGS
-                    'output_crs':'EPSG:4326',
-                    'elev_unit' : 'feet',
-                    'depth' : 0,
-                    'instrument' : 'Raspberry Shake',
-                    'metapath' : '',
-                    'hvsr_band' : [1, 40],
-                    'write_path':'',
-                    'source':'file', 
-                    'export_format':'mseed', 
-                    'detrend':'spline', 
-                    'detrend_order':2, 
-                    'verbose':False}
-
-        print(f"\t{dataReadInfoDF.shape[0]} sites found: {list(dataReadInfoDF['site'])}")
-        if verbose:
-            maxLength = 25
-            maxColWidth = 12
-            if dataReadInfoDF.shape[0] > maxLength:
-                print(f'\t Showing information for first {maxLength} files only:')
-            print()
-            #Print nicely formated df
-            #Print column names
-            print('\t', end='')
-            for col in dataReadInfoDF.columns:
-                print(str(col)[:maxColWidth].ljust(maxColWidth), end='  ')
-            print('\n\t', end='')
-
-            #Print separator
-            tableLen = (maxColWidth+2)*len(dataReadInfoDF.columns)
-            for r in range(tableLen):
-                print('-', end='')
-            print()
-
-            #Print columns/rows
-            for index, row in dataReadInfoDF.iterrows():
-                print('\t', end='')
-                for col in row:
-                    if len(str(col)) > maxColWidth:
-                        print((str(col)[:maxColWidth-3]+'...').ljust(maxColWidth), end='  ')
-                    else:
-                        print(str(col)[:maxColWidth].ljust(maxColWidth), end='  ')
-                print()
-            if dataReadInfoDF.shape[0] > maxLength:
-                endline = f'\t...{dataReadInfoDF.shape[0]-maxLength} more rows in file.\n'
-            else:
-                endline = '\n'
-            print(endline)
-
-            print('Fetching the following files:')
-        param_dict_list = []
-        verboseStatement = []
-        if param_col is None: #Not a single parameter column, each col=parameter
-            for row_ind in range(dataReadInfoDF.shape[0]):
-                param_dict = {}
-                verboseStatement.append([])
-                for col in dataReadInfoDF.columns:
-                    if col in input_params_params or col in get_metadata_params or col in fetch_data_params:
-                        currParam = dataReadInfoDF.loc[row_ind, col]
-                        if pd.isna(currParam) or currParam == 'nan':
-                            if col in default_dict.keys():
-                                param_dict[col] = default_dict[col] #Get default value
-                                if verbose:
-                                    if type(default_dict[col]) is str:
-                                        verboseStatement[row_ind].append("\t\t'{}' parameter not specified in batch file. Using {}='{}'".format(col, col, default_dict[col]))
-                                    else:
-                                        verboseStatement[row_ind].append("\t\t'{}' parameter not specified in batch file. Using {}={}".format(col, col, default_dict[col]))
-                            else:
-                                param_dict[col] = None
-                        else:
-                            param_dict[col] = dataReadInfoDF.loc[row_ind, col]
-                param_dict_list.append(param_dict)
-        else:
-            if param_col not in dataReadInfoDF.columns:
-                raise IndexError('{} is not a column in {} (columns are: {})'.format(param_col, input_data, dataReadInfoDF.columns))
-            for row in dataReadInfoDF[param_col]:
-                param_dict = {}
-                splitRow = str(row).split(',')
-                for item in splitRow:
-                    param_dict[item.split('=')[0]] = item.split('=')[1]
-                param_dict_list.append(param_dict)
-        #input_params(datapath,site,network,station,loc,channels, acq_date,starttime, endtime, tzone, xcoord, ycoord, elevation, depth, instrument, metapath, hvsr_band)
-        #fetch_data(params, inv, source, trim_dir, export_format, detrend, detrend_order, verbose)
-        #get_metadata(params, write_path)
-    elif batch_type == 'filelist':
-        if isinstance(batch_params, list):
-            if len(batch_params) != len(input_data):
-                raise RuntimeError('If batch_params is list, it must be the same length as input_data. len(batch_params)={} != len(input_data)={}'.format(len(batch_params), len(input_data)))
-            param_dict_list = batch_params
-        elif isinstance(batch_params, dict):
-            batch_params.update(readcsv_getMeta_fetch_kwargs)
-            param_dict_list = []
-            for i in range(len(input_data)):
-                param_dict_list.append(batch_params)
-        
-        # Read and process each MiniSEED file
-        for i, file in enumerate(input_data):
-            if isinstance(file, obspy.core.stream.Stream):
-                warnings.warn('Reading in a list of Obspy streams is not currently supported, but may be implemented in the future', FutureWarning)
-                pass 
-            else:
-                param_dict_list[i]['datapath'] = file
-
-    hvsr_metaDict = {}
-    zfillDigs = len(str(len(param_dict_list))) #Get number of digits of length of param_dict_list
-    i=0
-    for i, param_dict in enumerate(param_dict_list):
-        # Read the data file into a Stream object
-        input_params_kwargs = {k: v for k, v in locals()['readcsv_getMeta_fetch_kwargs'].items() if k in input_params.__code__.co_varnames}
-        input_params_kwargs2 = {k: v for k, v in param_dict.items() if k in input_params.__code__.co_varnames}
-        input_params_kwargs.update(input_params_kwargs2)
-
-        params = input_params(**input_params_kwargs)
-
-        fetch_data_kwargs = {k: v for k, v in locals()['readcsv_getMeta_fetch_kwargs'].items() if k in fetch_data.__code__.co_varnames}
-        fetch_data_kwargs2 = {k: v for k, v in param_dict.items() if k in fetch_data.__code__.co_varnames[0:7]}
-        fetch_data_kwargs.update(fetch_data_kwargs2)
-        
-        try:
-            params = fetch_data(params=params, **fetch_data_kwargs)
-        except:
-            params['ProcessingStatus']['FetchDataStatus']=False
-            params['ProcessingStatus']['OverallStatus'] = False            
-        
-        if verbose and params['ProcessingStatus']['FetchDataStatus']:
-            print("\t  {}".format(params['site']))
-            if verboseStatement !=[]:
-                for item in verboseStatement[i]:
-                    print(item)
-        elif verbose and not params['ProcessingStatus']['FetchDataStatus']:
-            print("\t  {} not read correctly. Processing will not be carried out.".format(params['site']))
-                
-        params['batch'] = True
-
-        if params['site'] == default_dict['site']: #If site was not designated
-            params['site'] = "{}_{}".format(params['site'], str(i).zfill(zfillDigs))
-            i+=1
-        hvsr_metaDict[params['site']] = params
-
-    hvsr_metaDict = HVSRBatch(hvsr_metaDict)
-
-    return hvsr_metaDict
-
def catch_errors(func)
-
- -Expand source code - -
def catch_errors(func):
-    #Define a local function to get a list of warnings that we'll use in the output
-    def get_warning_msg_list(w):
-        messageList = []
-        #Collect warnings that happened before we got to the error
-        if w:
-            hasWarnings = True
-            for wi in w:
-                warning_category = type(wi.message).__name__.title().replace('warning','Warning')
-                #if w.line is None:
-                #    w.line = linecache.getline(wi.filename, wi.lineno)
-                warning_lineNo = wi.lineno
-                warning_message = str(wi.message)
-                # append the warning category and message to messageList so we get all warnings
-                messageList.append(f'{warning_category} ({warning_lineNo}): {warning_message}')
-        return messageList
-    
-    # use functools.wraps to preserve the original function's metadata
-    @functools.wraps(func)
-    def wrapper(*args, **kwargs):
-        result = None
-        # use the global keyword to access the error_message and error_category variables
-        global error_message
-        global error_category
-
-        messageList = []
-        hasWarnings = False
-        # use a try-except block to catch any exceptions
-        #result = func(*args, **kwargs)
-        try:
-            # use a context manager to catch any warnings
-            with warnings.catch_warnings(record=True) as w:
-                warnings.simplefilter('always')
-                # call the original function with the given arguments
-                result = func(*args, **kwargs)
-                
-                #Get message list, [] if no messages, doesn't run at all if Error/exception in func
-                messageList = get_warning_msg_list(w)
-                if messageList == []:
-                    return result
-                else:
-                    warningMessage = "WARNING:"
-                    for msg in messageList:
-                        warningMessage = "\n {}".format(msg)
-
-                    messagebox.showwarning(title='WARNINGS', message=warningMessage)
-                    
-        except Exception as e:
-            messageList = get_warning_msg_list(w)
-            errorObj = sys.exc_info()[2]
-
-            mainErrText = sys.exc_info()[1]
-
-            mainErrTb = traceback.extract_tb(sys.exc_info()[2])[-1]
-            mainErrFilePath = pathlib.Path(mainErrTb[0])
-            
-            mainErrFileName = mainErrFilePath.stem
-            mainErrLineNo = mainErrTb[1]
-            mainErrFunc = mainErrTb[2]
-            mainErrCodeLine = mainErrTb[3]
-
-            errLineNo1 = str(traceback.extract_tb(sys.exc_info()[2])[-1].lineno)
-            error_category = type(e).__name__.title().replace('error', 'Error')
-            error_message = f"{e} ({errLineNo1})"
-            
-            #Get message list, [] if no messages, doesn't run at all if Error/exception in func
-            warningMessageList = get_warning_msg_list(w)
-
-            #Build error messages
-            tbTuple0 = sys.exc_info()[0]
-            tbTuple1 = sys.exc_info()[1]
-            tbTuple2 = traceback.extract_tb(sys.exc_info()[2])
-            
-            logMsg = f"**ERROR**\n{tbTuple0.__name__}: {tbTuple1}"
-            dialogErrMsg = logMsg.split(':')[1]
-            for tb in tbTuple2:
-                logMsg = logMsg + '\n\t'
-                logMsg = logMsg + f"{pathlib.Path(tb[0]).stem}.{tb[2]}(): {tb[3]} (Line {tb[1]})"
-                dialogErrMsg = dialogErrMsg + f"\n{pathlib.Path(tb[0]).stem}.{tb[2]}(), Line {tb[1]}"
-            logMsg = logMsg + '\n\n'
-
-            #fullErrorMessage = f'ERROR {mainErrFileName}.{mainErrFunc} ({mainErrLineNo}): {mainErrText} \n\n {mainErrFileName} Line {mainErrLineNo}: {mainErrCodeLine}.'
-            if messageList == []:
-                pass
-            else:
-                dialogErrMsg = dialogErrMsg+"\n\n  Additional Warnings along the way. See Log for more information."
-                logMsg = logMsg + "\n\n\t  *WARNING(S)*\n\tAdditional Warnings along the way:"
-                for addMsg in warningMessageList:
-                    logMsg = logMsg+"\n\t\t{}".format(addMsg)
-
-
-            SPRIT_App.log_errorMsg(spritApp, logMsg)
-
-            messagebox.showerror(title=f'ERROR ({error_category})',
-                                    message=dialogErrMsg)
-            update_progress_bars(100)
-
-        # return the result of the function or the error/warning messages and categories
-        return result
-    # return the wrapper function
-    return wrapper
-
def check_gui_requirements()
-
- -Expand source code - -
def check_gui_requirements():
-    #First, check requirements
-    # Define a command that tries to open a window
-    command = "python -c \"import tkinter; tkinter.Tk()\""
-
-    # Run the command and get the exit code
-    exit_code = os.system(command)
-    
-    # Check if tkinter gui could be created
-    if exit_code == 0:
-        #Tkinter 
-        oktoproceed=True
-    else:
-        oktoproceed=False
-        print("GUI window cannot be created.")
-
-    return oktoproceed
-
-    #if sys.platform == 'linux':
-    #    # Check if qtwayland5 is installed
-    #    output = subprocess.run(["dpkg", "-s", "qtwayland5"], capture_output=True, text=True)
-    #    if "Status: install ok installed" in output.stdout:
-    #        print("qtwayland5 is already installed")
-    #    else:
-    #        print("qtwayland5 is not installed")
-    #        # Install qtwayland5
-    #        os.system("sudo apt install qtwayland5")
-
def check_mark(incolor=False, interminal=False) @@ -721,28 +154,9 @@

Raises

The default Windows terminal is not able to display the check mark character correctly. This function returns another displayable character if platform is Windows

-
- -Expand source code - -
def check_mark(incolor=False, interminal=False):
-    """The default Windows terminal is not able to display the check mark character correctly.
-       This function returns another displayable character if platform is Windows"""
-    if incolor:
-        try:
-            check = get_char(u'\u2705')
-        except:
-            check = get_char(u'\u2714')
-    else:
-        check = get_char(u'\u2714')
-
-    if sys.platform=='win32' and interminal:
-        check = get_char(u'\u039E')
-    return check
-
-def check_peaks(hvsr_data, hvsr_band=[1, 40], peak_selection='max', peak_freq_range=[1, 20], verbose=False) +def check_peaks(hvsr_data, hvsr_band=[0.4, 40], peak_selection='max', peak_freq_range=[0.4, 40], azimuth='HV', verbose=False)

Function to run tests on HVSR peaks to find best one and see if it passes quality checks

@@ -750,13 +164,13 @@

Parameters

hvsr_data : dict
Dictionary containing all the calculated information about the HVSR data (i.e., hvsr_out returned from process_hvsr)
-
hvsr_band : tuple or list, default=[1, 40]
+
hvsr_band : tuple or list, default=[0.4, 40]
2-item tuple or list with lower and upper limit of frequencies to analyze
peak_selection : str or numeric, default='max'
How to select the "best" peak used in the analysis. For peak_selection="max" (default value), the highest peak within peak_freq_range is used. For peak_selection='scored', an algorithm is used to select the peak based in part on which peak passes the most SESAME criteria. If a numeric value is used (e.g., int or float), this should be a frequency value to manually select as the peak of interest.
-
peak_freq_range : tuple or list, default=[1, 20];
+
peak_freq_range : tuple or list, default=[0.4, 40];
The frequency range within which to check for peaks. If there is an HVSR curve with multiple peaks, this allows the full range of data to be processed while limiting peak picks to likely range.
verbose : bool, default=False
Whether to print results and inputs to terminal.
@@ -767,259 +181,21 @@

Returns

: HVSRData
or HVSRBatch object
Object containing previous input data, plus information about peak tests
-
- -Expand source code - -
def check_peaks(hvsr_data, hvsr_band=[1, 40], peak_selection='max', peak_freq_range=[1, 20], verbose=False):
-    """Function to run tests on HVSR peaks to find best one and see if it passes quality checks
-
-        Parameters
-        ----------
-        hvsr_data : dict
-            Dictionary containing all the calculated information about the HVSR data (i.e., hvsr_out returned from process_hvsr)
-        hvsr_band : tuple or list, default=[1, 40]
-            2-item tuple or list with lower and upper limit of frequencies to analyze
-        peak_selection : str or numeric, default='max'
-            How to select the "best" peak used in the analysis. For peak_selection="max" (default value), the highest peak within peak_freq_range is used.
-            For peak_selection='scored', an algorithm is used to select the peak based in part on which peak passes the most SESAME criteria.
-            If a numeric value is used (e.g., int or float), this should be a frequency value to manually select as the peak of interest.
-        peak_freq_range : tuple or list, default=[1, 20];
-            The frequency range within which to check for peaks. If there is an HVSR curve with multiple peaks, this allows the full range of data to be processed while limiting peak picks to likely range.
-        verbose : bool, default=False
-            Whether to print results and inputs to terminal.
-        
-        Returns
-        -------
-        hvsr_data   : HVSRData or HVSRBatch object
-            Object containing previous input data, plus information about peak tests
-    """
-    orig_args = locals().copy() #Get the initial arguments
-
-    # Update with processing parameters specified previously in input_params, if applicable
-    if 'processing_parameters' in hvsr_data.keys():
-        if 'check_peaks' in hvsr_data['processing_parameters'].keys():
-            for k, v in hvsr_data['processing_parameters']['check_peaks'].items():
-                defaultVDict = dict(zip(inspect.getfullargspec(check_peaks).args[1:], 
-                                        inspect.getfullargspec(check_peaks).defaults))
-                # Manual input to function overrides the imported parameter values
-                if (not isinstance(v, (HVSRData, HVSRBatch))) and (k in orig_args.keys()) and (orig_args[k]==defaultVDict[k]):
-                    orig_args[k] = v
-
-    hvsr_band = orig_args['hvsr_band']
-    peak_selection = orig_args['peak_selection']
-    peak_freq_range = orig_args['peak_freq_range']
-    verbose = orig_args['verbose']
-
-    if (verbose and 'input_params' not in hvsr_data.keys()) or (verbose and not hvsr_data['batch']):
-        if isinstance(hvsr_data, HVSRData) and hvsr_data['batch']:
-            pass
-        else:
-            print('\nChecking peaks in the H/V Curve (check_peaks())')
-            print('\tUsing the following parameters:')
-            for key, value in orig_args.items():
-                if key=='hvsr_data':
-                    pass
-                else:
-                    print('\t  {}={}'.format(key, value))
-            print()
-  
-    #First, divide up for batch or not
-    if isinstance(hvsr_data, HVSRBatch):
-        if verbose:
-            print('\t  Running in batch mode')
-        #If running batch, we'll loop through each site
-        for site_name in hvsr_data.keys():
-            args = orig_args.copy() #Make a copy so we don't accidentally overwrite
-            args['hvsr_data'] =  hvsr_data[site_name] #Get what would normally be the "params" variable for each site
-            if hvsr_data[site_name]['ProcessingStatus']['OverallStatus']:
-                try:
-                    hvsr_data[site_name] = _check_peaks_batch(**args) #Call another function, that lets us run this function again
-                except:
-                    if verbose:
-                        print(f"\t{site_name}: check_peaks() unsuccessful. Peaks not checked.")
-                    else:
-                        warnings.warn(f"\t{site_name}: check_peaks() unsuccessful. Peaks not checked.", RuntimeWarning)
-                
-        hvsr_data = HVSRBatch(hvsr_data)
-    else:
-        if hvsr_data['ProcessingStatus']['OverallStatus']:
-            if not hvsr_band:
-                hvsr_band = [1,40]
-            
-            hvsr_data['hvsr_band'] = hvsr_band
-
-            anyK = list(hvsr_data['x_freqs'].keys())[0]
-
-            x = hvsr_data['x_freqs'][anyK] #Consistent for all curves
-            y = hvsr_data['hvsr_curve'] #Calculated based on "Use" column
-
-            scorelist = ['score', 'scored', 'best', 's']
-            maxlist = ['max', 'highest', 'm']
-            # Convert peak_selection to numeric, get index of nearest value as list item for __init_peaks()
-            try:
-                peak_val = float(peak_selection)
-                index_list = [np.argmin(np.abs(x - peak_val))]        
-            except:
-                # If score method is being used, get index list for __init_peaks()
-                if peak_selection in scorelist:
-                    index_list = hvsr_data['hvsr_peak_indices'] #Calculated based on hvsr_curve
-                elif peak_selection in maxlist:
-                    #Get max index as item in list for __init_peaks()
-                    startInd = np.argmin(np.abs(x - peak_freq_range[0]))
-                    endInd = np.argmin(np.abs(x - peak_freq_range[1]))
-                    if startInd > endInd:
-                        holder = startInd
-                        startInd = endInd
-                        endInd = holder
-                    subArrayMax = np.argmax(y[startInd:endInd])
-
-                    # If max val is in subarray, this will be the same as the max of curve
-                    # Otherwise, it will be the index of the value that is max within peak_freq_range
-                    index_list = [subArrayMax+startInd]
-            
-            hvsrp = hvsr_data['hvsrp'] #Calculated based on "Use" column
-            hvsrm = hvsr_data['hvsrm'] #Calculated based on "Use" column
-
-            hvsrPeaks = hvsr_data['hvsr_df'][hvsr_data['hvsr_df']['Use']]['CurvesPeakIndices']
-            #hvsrPeaks = hvsr_data['ind_hvsr_peak_indices'] #Original calculation
-
-            hvsr_log_std = hvsr_data['hvsr_log_std']
-            peak_freq_range = hvsr_data['peak_freq_range']
-
-            #Do for hvsr
-            peak = __init_peaks(x, y, index_list, hvsr_band, peak_freq_range)
-
-            peak = __check_curve_reliability(hvsr_data, peak)
-            peak = __check_clarity(x, y, peak, do_rank=True)
-
-            #Do for hvsrp
-            # Find  the relative extrema of hvsrp (hvsr + 1 standard deviation)
-            if not np.isnan(np.sum(hvsrp)):
-                index_p = __find_peaks(hvsrp)
-            else:
-                index_p = list()
-
-            peakp = __init_peaks(x, hvsrp, index_p, hvsr_band, peak_freq_range)
-            peakp = __check_clarity(x, hvsrp, peakp, do_rank=True)
-
-            # Do for hvsrm
-            # Find  the relative extrema of hvsrm (hvsr - 1 standard deviation)
-            if not np.isnan(np.sum(hvsrm)):
-                index_m = __find_peaks(hvsrm)
-            else:
-                index_m = list()
-
-            peakm = __init_peaks(x, hvsrm, index_m, hvsr_band, peak_freq_range)
-            peakm = __check_clarity(x, hvsrm, peakm, do_rank=True)
-
-            # Get standard deviation of time peaks
-            stdf = __get_stdf(x, index_list, hvsrPeaks)
-
-            peak = __check_freq_stability(peak, peakm, peakp)
-            peak = __check_stability(stdf, peak, hvsr_log_std, rank=True)
-
-            hvsr_data['PeakReport'] = peak
-
-            #Iterate through peaks and 
-            #   Get the BestPeak based on the peak score
-            #   Calculate whether each peak passes enough tests
-            curveTests = ['WindowLengthFreq.','SignificantCycles', 'LowCurveStDevOverTime']
-            peakTests = ['PeakProminenceBelow', 'PeakProminenceAbove', 'PeakAmpClarity', 'FreqStability', 'PeakStability_FreqStD', 'PeakStability_AmpStD']
-            bestPeakScore = 0
-
-            for p in hvsr_data['PeakReport']:
-                #Get BestPeak
-                if p['Score'] > bestPeakScore:
-                    bestPeakScore = p['Score']
-                    bestPeak = p
-
-                #Calculate if peak passes criteria
-                cTestsPass = 0
-                pTestsPass = 0
-                for testName in p['PassList'].keys():
-                    if testName in curveTests:
-                        if p['PassList'][testName]:
-                            cTestsPass += 1
-                    elif testName in peakTests:
-                        if p['PassList'][testName]:
-                            pTestsPass += 1
-
-                if cTestsPass == 3 and pTestsPass >= 5:
-                    p['PeakPasses'] = True
-                else:
-                    p['PeakPasses'] = False
-                
-            #Designate BestPeak in output dict
-            if len(hvsr_data['PeakReport']) == 0:
-                bestPeak={}
-                print(f"No Best Peak identified for {hvsr_data['site']}")
-
-            hvsr_data['BestPeak'] = bestPeak
-        else:
-            hvsr_data['BestPeak'] = {}
-            print(f"Processing Errors: No Best Peak identified for {hvsr_data['site']}")
-            try:
-                hvsr_data.plot()
-            except:
-                pass
-
-        hvsr_data['processing_parameters']['check_peaks'] = {}
-        for key, value in orig_args.items():
-            hvsr_data['processing_parameters']['check_peaks'][key] = value
-
-
-    return hvsr_data
-
def check_tsteps(hvsr_data)

Check time steps of PPSDS to make sure they are all the same length

-
- -Expand source code - -
def check_tsteps(hvsr_data):
-    """Check time steps of PPSDS to make sure they are all the same length"""
-    ppsds = hvsr_data['ppsds']
-    tSteps = []
-    for k in ppsds.keys():
-        tSteps.append(np.array(ppsds[k]['psd_values']).shape[0])
-    if len(set(tSteps)) <= 1:
-        pass #This means all channels have same number of period_bin_centers
-        minTStep=tSteps[0]
-    else:
-        print('There is a different number of time-steps used to calculate HVSR curves. \n This may result in computational errors. Trimming longest.')
-        minTStep = min(tSteps)
-    return minTStep
-
def check_xvalues(ppsds)

Check x_values of PPSDS to make sure they are all the same length

-
- -Expand source code - -
def check_xvalues(ppsds):
-    """Check x_values of PPSDS to make sure they are all the same length"""
-    xLengths = []
-    for k in ppsds.keys():
-        xLengths.append(len(ppsds[k]['period_bin_centers']))
-    if len(set(xLengths)) <= 1:
-        pass #This means all channels have same number of period_bin_centers
-    else:
-        print('X-values (periods or frequencies) do not have the same values. \n This may result in computational errors')
-        #Do stuff to fix it?
-    return ppsds
-
-def checkifpath(filepath, sample_list='', verbose=False) +def checkifpath(filepath, sample_list='', verbose=False, raise_error=False)

Support function to check if a filepath is a pathlib.Path object and tries to convert if not

@@ -1033,2287 +209,15 @@

Returns

filepath : pathlib.Path
pathlib.Path of filepath
-
- -Expand source code - -
def checkifpath(filepath, sample_list='', verbose=False):
-    """Support function to check if a filepath is a pathlib.Path object and tries to convert if not
-
-    Parameters
-    ----------
-    filepath : str or pathlib.Path, or anything
-        Filepath to check. If not a valid filepath, will not convert and raises error
-
-    Returns
-    -------
-    filepath : pathlib.Path
-        pathlib.Path of filepath
-    """
-    if sample_list=='':
-        sample_list = ['1', '2', '3', '4', '5', '6', 'sample', 'batch', 'sample', 'sample_batch']
-        for s in range(1, 7):
-            sample_list.append(f"sample{s}")
-            sample_list.append(f"sample_{s}")
-
-    # checks if the variable is any instance of pathlib
-    if isinstance(filepath, pathlib.PurePath):
-        pass
-    elif str(filepath) in sample_list:
-        pass
-    else:
-        try:
-            filepath = pathlib.Path(filepath)
-        except:
-            if verbose:
-                warnings.warn('Filepath cannot be converted to pathlib path: {}'.format(filepath))
-        if not filepath.exists():
-            raise RuntimeError('File does not exist: {}'.format(filepath))
-    return filepath
-
def create_jupyter_ui()
-
- -Expand source code - -
def create_jupyter_ui():
-    global hvsr_data
-
-    ui_width = 20
-    ui_height= 12
-    global results_fig
-    global log_textArea
-    log_textArea = widgets.Textarea(value="SESSION LOG", disabled=True, layout={'height': '300px','width': '99%', 'overflow': 'scroll'})
-
-    # INPUT TAB
-    # Create a VBox for the accordions
-    input_HBox = widgets.HBox()
-    input_accordion_label_box = widgets.VBox()
-    input_accordion_box = widgets.VBox()
-    input_accordion = widgets.Accordion()
-
-    # Metadata accordion
-    metadata_grid = widgets.GridspecLayout(7, 10)
-    network_textbox = widgets.Text(description='Network:',
-                                    placeholder=get_default(sprit_hvsr.input_params, 'network'),
-                                    value=get_default(sprit_hvsr.input_params, 'network'),
-                                    tooltip="input_params(network)")
-
-    station_textbox = widgets.Text(description='Station:',
-                                    placeholder=get_default(sprit_hvsr.input_params, 'station'),
-                                    value=get_default(sprit_hvsr.input_params, 'station'))
-
-    location_textbox = widgets.Text(description='Location:',
-                                    placeholder=get_default(sprit_hvsr.input_params, 'loc'),
-                                    value=get_default(sprit_hvsr.input_params, 'loc'))
-
-    z_channel_textbox = widgets.Text(description='Z Channel:',
-                                    placeholder=get_default(sprit_hvsr.input_params, 'channels')[0],
-                                    value=get_default(sprit_hvsr.input_params, 'channels')[0])
-
-    e_channel_textbox = widgets.Text(description='E Channel:',
-                                    placeholder=get_default(sprit_hvsr.input_params, 'channels')[2],
-                                    value=get_default(sprit_hvsr.input_params, 'channels')[2])
-
-    n_channel_textbox = widgets.Text(description='N Channel:',
-                                    placeholder=get_default(sprit_hvsr.input_params, 'channels')[1],
-                                    value=get_default(sprit_hvsr.input_params, 'channels')[1])
-
-
-    # Instrument Settings
-    inst_settings_text = widgets.Text(placeholder='Instrument Settings Filepath', layout=widgets.Layout(width='55%'))
-    instrument_read_button = widgets.Button(icon='fa-file-import',button_style='success',
-                                            layout=widgets.Layout(width='4%'))
-    instrument_settings_button = widgets.Button(description='Select .inst file',
-                                            layout=widgets.Layout(width='10%'))
-    inst_settings_hbox = widgets.HBox([inst_settings_text,instrument_read_button, instrument_settings_button])
-    
-    def select_inst(event):
-        try:
-            if event.description == 'Select .inst file':
-                root = tk.Tk()
-                root.wm_attributes('-topmost', True)
-                root.withdraw()
-                inst_files = filedialog.askopenfilenames(defaultextension='.inst', filetypes=[('Inst', '.inst')],
-                                                                    title="Select Instrument Settings File")
-                if isinstance(inst_files, tuple):
-                    pass
-                else:
-                    inst_files = tuple(inst_files)
-                root.destroy()
-            else:
-                inst_files = tuple([inst_settings_text.value])
-
-            for i, inst_f in enumerate(inst_files):
-                inst_settings_text.value = pathlib.Path(inst_f).as_posix()
-                inst_settings = sprit_hvsr.import_settings(settings_import_path=pathlib.Path(inst_f).as_posix(), settings_import_type='instrument')
-                
-                # Go through all items and add them
-                if 'instrument' in inst_settings.keys():
-                    if inst_settings['instrument'] not in instrument_dropdown.options:
-                        instrument_dropdown.options.append(inst_settings['instrument'])
-                    instrument_dropdown.value = inst_settings['instrument']
-                
-                if 'net' in inst_settings.keys():
-                    network_textbox.value = inst_settings['net']
-
-                if 'sta' in inst_settings.keys():
-                    station_textbox.value = inst_settings['sta']
-
-                if 'loc' in inst_settings.keys():
-                    location_textbox.value = inst_settings['loc']
-
-                if 'cha' in inst_settings.keys():
-                    for c in inst_settings['cha']:
-                        if c.lower()[2]=='z':
-                            z_channel_textbox.value = c
-                        if c.lower()[2]=='e':
-                            e_channel_textbox.value = c
-                        if c.lower()[2] =='n':
-                            n_channel_textbox.value = c
-                
-                if 'metapath' in inst_settings.keys():
-                    metadata_filepath.value = inst_settings['metapath']
-
-                if 'hvsr_band' in inst_settings.keys():
-                    hvsr_band_min_box.value = inst_settings['hvsr_band'][0]
-                    hvsr_band_max_box.value = inst_settings['hvsr_band'][1]
-
-        except Exception as e:
-            print(e)
-            instrument_settings_button.disabled=True
-            instrument_settings_button.description='Use Text Field'
-    
-    instrument_settings_button.on_click(select_inst)
-    instrument_read_button.on_click(select_inst)
-
-    metadata_grid[0,:] = inst_settings_hbox
-    metadata_grid[1,0] = network_textbox
-    metadata_grid[2,0] = station_textbox
-    metadata_grid[3,0] = location_textbox
-    metadata_grid[4,0] = z_channel_textbox
-    metadata_grid[5,0] = e_channel_textbox
-    metadata_grid[6,0] = n_channel_textbox
-
-    # Acquisition Accordion
-    instrument_grid = widgets.GridspecLayout(5, 10)
-    # Date Picker labelled "Acquisition Date"
-    acquisition_date_picker = widgets.DatePicker(description='Acq.Date:',
-                                            placeholder=datetime.datetime.today().date(),
-                                            value=datetime.datetime.today().date())
-
-    # Label that shows the Date currently selected in the Date Picker
-    acquisition_doy = widgets.IntText(description='DOY',
-                                                placeholder=f"{acquisition_date_picker.value.timetuple().tm_yday}",
-                                                value=f"{acquisition_date_picker.value.timetuple().tm_yday}",
-                                                layout=widgets.Layout(width='auto'))
-
-    def on_acq_date_change(change):
-        acquisition_doy.value = acquisition_date_picker.value.timetuple().tm_yday
-    acquisition_date_picker.observe(on_acq_date_change)
-
-    def on_doy_change(change):
-        curr_year = datetime.datetime.today().year
-        if acquisition_doy.value > datetime.datetime.today().timetuple().tm_yday:
-            curr_year -= 1
-        acquisition_date_picker.value = (datetime.datetime(curr_year, 1, 1) + datetime.timedelta(days = acquisition_doy.value-1)).date()
-    acquisition_doy.observe(on_doy_change)
-
-    # Time selector (hour and minute) labelled "Start Time".
-    try:
-        start_time_picker = widgets.TimePicker(description='Start Time:',
-                                            placeholder=datetime.time(0,0,0),
-                                            value=datetime.time(0,0,0),
-                                            layout=widgets.Layout(width='auto'))
-    except Exception as e:
-        start_time_picker = widgets.Text(description='Start Time:',
-                                        placeholder='00:00',
-                                        value='00:00',
-                                        layout=widgets.Layout(width='auto'))
-
-    # Time selector (hour and minute) labelled "End Time". Same as Start Time otherwise.
-    try:
-        end_time_picker = widgets.TimePicker(description='End Time:',
-                                        placeholder=datetime.time(23,59),
-                                        value=datetime.time(23,59),
-                                        layout=widgets.Layout(width='auto'))
-    except Exception as e:
-        end_time_picker = widgets.Text(description='End Time:',
-                                        placeholder='23:59:59.999999',
-                                        value='23:59:59.999999',
-                                        layout=widgets.Layout(width='auto'))
-
-    tzlist = list(available_timezones())
-    tzlist.sort()
-    tzlist.remove('UTC')
-    tzlist.remove('US/Central')
-    tzlist.insert(0, 'US/Central')
-    tzlist.insert(0, 'UTC')
-    # A dropdown list with all the items from zoneinfo.available_timezones(), default 'UTC'
-    time_zone_dropdown = widgets.Dropdown(options=tzlist,value=get_default(sprit_hvsr.input_params, 'tzone'),
-                                            description='Time Zone:',layout=widgets.Layout(width='fill'))
-
-    instrument_grid[0,0] = acquisition_date_picker
-    instrument_grid[0,1] = acquisition_doy
-    instrument_grid[1,0] = start_time_picker
-    instrument_grid[2,0] = end_time_picker
-    instrument_grid[3,0] = time_zone_dropdown
-
-    # LOCATION ACCORDION
-    location_grid = widgets.GridspecLayout(4, 10)
-    # X coordinate input
-    xcoord_textbox = widgets.FloatText(description='X Coordinate:', tooltip='xcoord',
-                                        value=get_default(sprit_hvsr.input_params, 'xcoord'), 
-                                        placeholder=get_default(sprit_hvsr.input_params, 'xcoord'),
-                                        layout=widgets.Layout(width='auto'))
-    location_grid[0, 0] = xcoord_textbox
-
-    # Y coordinate input
-    ycoord_textbox = widgets.FloatText(description='Y Coordinate', tooltip='ycoord:',
-                                        value=get_default(sprit_hvsr.input_params, 'ycoord'), 
-                                        placeholder=get_default(sprit_hvsr.input_params, 'ycoord'),
-                                        layout=widgets.Layout(width='auto'))
-    location_grid[1, 0] = ycoord_textbox
-
-    # Z coordinate input
-    zcoord_textbox = widgets.FloatText(description='Z Coordinate', tooltip='elevation:',
-                                        value=get_default(sprit_hvsr.input_params, 'elevation'),
-                                        placeholder=get_default(sprit_hvsr.input_params, 'elevation'),                                     
-                                        layout=widgets.Layout(width='auto'))
-    location_grid[2, 0] = zcoord_textbox
-
-    # Z coordinate unit input
-    elevation_unit_textbox = widgets.Dropdown(options=[('Feet', 'feet'), ('Meters', 'meters')],
-                                                value=get_default(sprit_hvsr.input_params, 'elev_unit'),
-                                                description='Z Unit:', tooltip='elev_unit',
-                                                layout=widgets.Layout(width='auto'))
-    location_grid[2, 1] = elevation_unit_textbox
-
-    # Input CRS input
-    input_crs_textbox = widgets.Text(description='Input CRS:',
-                                        layout=widgets.Layout(width='auto'),
-                                        placholder=get_default(sprit_hvsr.input_params, 'input_crs'),
-                                        value=get_default(sprit_hvsr.input_params, 'input_crs'))
-    location_grid[3, 0] = input_crs_textbox
-
-    # Output CRS input
-    output_crs_textbox = widgets.Text(description='Output CRS:',
-                                        layout=widgets.Layout(width='auto'),
-                                        placholder=get_default(sprit_hvsr.input_params, 'output_crs'),
-                                        value=get_default(sprit_hvsr.input_params, 'output_crs'))
-    location_grid[3, 1] = output_crs_textbox
-
-    # IO PARAMS ACCORDION
-    ioparam_grid = widgets.GridspecLayout(6, 10)
-
-    # Data format (for obspy format to use to read in)
-    data_format_dropdown = widgets.Dropdown(
-            options=OBSPY_FORMATS,
-            value='MSEED',
-            description='Data Formats:', layout=widgets.Layout(width='auto'))
-
-    hvsr_band_min_box = widgets.FloatText(description='HVSR Band [Hz]', style={'description_width': 'initial'},
-                                          placeholder=get_default(sprit_hvsr.input_params, 'hvsr_band')[0],
-                                          value=get_default(sprit_hvsr.input_params, 'hvsr_band')[0])
-    hvsr_band_max_box = widgets.FloatText(placeholder=get_default(sprit_hvsr.input_params, 'hvsr_band')[1],
-                                          value=get_default(sprit_hvsr.input_params, 'hvsr_band')[1])
-    hvsr_band_hbox = widgets.HBox([hvsr_band_min_box, hvsr_band_max_box],layout=widgets.Layout(width='auto'))
-
-
-    peak_freq_range_min_box = widgets.FloatText(description='Peak Range [Hz]',placeholder=get_default(sprit_hvsr.input_params, 'peak_freq_range')[0], 
-                                                value=get_default(sprit_hvsr.input_params, 'peak_freq_range')[0],
-                                                style={'description_width': 'initial'}, layout=widgets.Layout(width='auto'))
-    peak_freq_range_max_box = widgets.FloatText(placeholder=get_default(sprit_hvsr.input_params, 'peak_freq_range')[1], 
-                                                value=get_default(sprit_hvsr.input_params, 'peak_freq_range')[1],layout=widgets.Layout(width='auto'))
-    peak_freq_range_hbox = widgets.HBox([peak_freq_range_min_box, peak_freq_range_max_box],layout=widgets.Layout(width='auto'))
-
-
-    # A dropdown labeled "Detrend type" with "Spline", "Polynomial", or "None"
-    detrend_type_dropdown = widgets.Dropdown(options=[('Spline', 'spline'), ('Polynomial', 'polynomial'), ('None', 'none')],
-                            description='Detrend Type:',  layout=widgets.Layout(width='auto'))
-    detrend_order = widgets.FloatText(description='Order:', tooltip='detrend_order', placeholder=get_default(sprit_hvsr.fetch_data, 'detrend_order'), 
-                                      value=get_default(sprit_hvsr.fetch_data, 'detrend_order'),layout=widgets.Layout(width='auto'))
-
-    # A text to specify the trim directory
-    trim_directory = widgets.Text(description='Trim Dir.:', value="None",#pathlib.Path().home().as_posix(),
-                                    layout=widgets.Layout(width='auto'))
-    trim_export_dropdown = widgets.Dropdown(
-                options=OBSPY_FORMATS,
-                value='MSEED',
-                description='Trim Format:', layout=widgets.Layout(width='auto'))
-    trim_directory_upload = widgets.FileUpload(
-                            accept='', 
-                            multiple=False, layout=widgets.Layout(width='auto'))
-
-    # Processing Settings
-    proc_settings_text = widgets.Text(placeholder='Instrument Settings Filepath', layout=widgets.Layout(width='55%'))
-    proc_settings_read_button = widgets.Button(icon='fa-file-import',button_style='success',
-                                            layout=widgets.Layout(width='4%'))
-    proc_settings_browse_button = widgets.Button(description='Select .proc file',
-                                            layout=widgets.Layout(width='10%'))
-    proc_settings_hbox = widgets.HBox([proc_settings_text, proc_settings_read_button, proc_settings_browse_button])
-    
-    excluded_params = ['hvsr_data', 'params', 'hvsr_results']
-    funcList = [sprit_hvsr.fetch_data, sprit_hvsr.remove_noise,
-                sprit_hvsr.generate_ppsds, sprit_hvsr.process_hvsr,
-                sprit_hvsr.remove_outlier_curves, sprit_hvsr.check_peaks,
-                sprit_hvsr.get_report]
-
-    def select_proc(event):
-        try:
-            if event.description == 'Select .proc file':
-                root = tk.Tk()
-                root.wm_attributes('-topmost', True)
-                root.withdraw()
-                proc_files = filedialog.askopenfilenames(defaultextension='.proc', filetypes=[('PROC', '.proc')],
-                                                                    title="Select Processing Settings File")
-                if isinstance(proc_files, tuple):
-                    pass
-                else:
-                    proc_files = tuple(proc_files)
-                root.destroy()
-            else:
-                proc_files = tuple([proc_settings_text.value])
-
-            for i, proc_f in enumerate(proc_files):
-                proc_settings_text.value = pathlib.Path(proc_f).as_posix()
-                proc_settings = sprit_hvsr.import_settings(settings_import_path=pathlib.Path(proc_f).as_posix(), settings_import_type='processing')
-                
-                for func, params in proc_settings.items():
-                    if func in widget_param_dict.keys():
-                        for prm, val in params.items():
-                            if prm in widget_param_dict[func].keys():
-                                print(prm, ':', widget_param_dict[func][prm],' |  ', val)
-                                if val is None or val=='None':
-                                    val='none'
-                                if prm == 'export_format':
-                                    val = val.upper()
-                                if prm == 'smooth':
-                                    if val is True:
-                                        val = 51
-                                if prm == 'resample':
-                                    if val is True:
-                                        val = 1000
-                                if isinstance(widget_param_dict[func][prm], list):
-                                    for i, item in enumerate(widget_param_dict[func][prm]):
-                                        item.value = val[i]
-                                else:
-                                    widget_param_dict[func][prm].value = val
-        except Exception as e:
-            print(e)
-            proc_settings_browse_button.disabled=True
-            proc_settings_browse_button.description='Use Text Field'
-    
-    proc_settings_read_button.on_click(select_proc)
-    proc_settings_browse_button.on_click(select_proc)
-
-    ioparam_grid[0,:] = proc_settings_hbox
-    ioparam_grid[1,0] = data_format_dropdown
-    ioparam_grid[2,:5] = hvsr_band_hbox
-    ioparam_grid[3,:5] = peak_freq_range_hbox
-    ioparam_grid[4,:1] = detrend_type_dropdown
-    ioparam_grid[4,1] = detrend_order
-    ioparam_grid[5,:6] = trim_directory
-    ioparam_grid[5, 6:8] = trim_export_dropdown
-    ioparam_grid[5, 8] = trim_directory_upload
-
-    # PYTHON API ACCORDION
-    inputAPI_grid = widgets.GridspecLayout(2, 10)
-    # A text label with "input_params()"
-    input_params_prefix = widgets.HTML(value='<style>p {word-wrap: break-word}</style> <p>' + 'input_params' + '</p>', 
-                                       layout=widgets.Layout(width='fill', justify_content='flex-end',align_content='flex-start'))
-    input_params_call = widgets.HTML(value='<style>p {word-wrap: break-word}</style> <p>' + '()' + '</p>',
-                                     layout=widgets.Layout(width='fill', justify_content='flex-start',align_content='flex-start'),)
-    #input_params_call =  widgets.Label(value='input_params()', layout=widgets.Layout(width='auto'))
-    inputAPI_grid[0, 0] = input_params_prefix
-    inputAPI_grid[0, 1:] = input_params_call
-
-    # A text label with "fetch_data()"
-    fetch_data_prefix = widgets.HTML(value='<style>p {word-wrap: break-word}</style> <p>' + 'fetch_data' + '</p>', 
-                                       layout=widgets.Layout(width='fill', justify_content='flex-end',align_content='flex-start'))
-    fetch_data_call = widgets.HTML(value='<style>p {word-wrap: break-word}</style> <p>' + '()' + '</p>',
-                                     layout=widgets.Layout(width='fill', justify_content='flex-start',align_content='flex-start'),)
-    inputAPI_grid[1, 0] = fetch_data_prefix
-    inputAPI_grid[1, 1:] = fetch_data_call
-
-    # Set it all in place
-    metaLabel = widgets.Label('Instrument', layout=widgets.Layout(height='20%', align_content='center', justify_content='flex-end'))
-    instLabel = widgets.Label('Acquisition', layout=widgets.Layout(height='20%', align_content='center', justify_content='flex-end'))
-    locLabel = widgets.Label('Location', layout=widgets.Layout(height='20%', align_content='center', justify_content='flex-end'))
-    ioparmLabel = widgets.Label('IO/Params', layout=widgets.Layout(height='20%', align_content='center', justify_content='flex-end'))
-    apiLabel = widgets.Label('API Call', layout=widgets.Layout(height='20%', align_content='center', justify_content='flex-end'))
-    input_accordion_label_box.children = [metaLabel, instLabel, locLabel, ioparmLabel, apiLabel]
-    input_accordion_label_box.layout = widgets.Layout(align_content='space-between', width='5%')
-
-    input_accordion.children = [metadata_grid, instrument_grid, location_grid, ioparam_grid, inputAPI_grid]
-    input_accordion.titles = ["Instrument Metadata", "Acquisition Information", "Location Information", "I/O and Parameters", "See Python API Call"]
-    input_accordion_box.layout = widgets.Layout(align_content='space-between', width='99%')
-    
-    input_accordion.layout = widgets.Layout(width='99%')
-
-    # ADD THE REST OF THE WIDGETS AROUND THE ACCORDIONS
-    # A text box for the site name
-    site_name = widgets.Text(description='Site Name:',
-                            value='HVSR_Site',
-                            placeholder='HVSR_Site',
-                            style={'description_width': 'initial'}, layout=widgets.Layout(width='30%'))
-
-    tenpct_spacer = widgets.Button(description='', layout=widgets.Layout(width='20%', visibility='hidden'))
-
-    # Dropdown with different source types 
-    data_source_type = widgets.Dropdown(options=[('File', 'file'), ('Raw', 'raw'), ('Batch', 'batch'), ('Directory', 'dir')],
-                                            description='Data Source type:',
-                                            value='file',orientation='horizontal', 
-                                            style={'description_width': 'initial'},
-                                            layout=widgets.Layout(width='20%'))
-    def on_ds_change(event):
-        if data_source_type.value == 'file' or data_source_type.value== 'batch':
-            browse_data_button.description = 'Select Files'
-        else:
-            browse_data_button.description = 'Select Folders'
-    data_source_type.observe(on_ds_change)
-    # Dropdown labeled "Instrument" with options "Raspberry Shake", "Tromino", "Other"
-    instrument_dropdown = widgets.Dropdown(options=['Raspberry Shake', 'Tromino', 'Other'],
-                                        style={'description_width': 'initial'},
-                                        description='Instrument:',layout=widgets.Layout(width='20%'))
-
-    # Processing Settings
-    processing_settings_button = widgets.FileUpload(accept='.proc', description='Processing Settings',
-                                            multiple=False,layout=widgets.Layout(width='10%'))
-
-    # Whether to show plots outside of widget
-    show_plot_check =  widgets.Checkbox(description='Print Plots', value=False, disabled=False, indent=False,
-                                    layout=widgets.Layout(width='10%', justify_content='flex-end'))
-
-
-    # Whether to print to terminal
-    verbose_check = widgets.Checkbox(description='Verbose', value=False, disabled=False, indent=False,
-                                    layout=widgets.Layout(width='10%', justify_content='flex-end'))
-
-    # A text box labeled Data Filepath
-    data_filepath = widgets.Text(description='Data Filepath:',
-                                    placeholder='sample', value='sample',
-                                    style={'description_width': 'initial'},layout=widgets.Layout(width='70%'))
-
-    # A button next to it labeled "Browse"
-    browse_data_button = widgets.Button(description='Select Files', layout=widgets.Layout(width='10%'))
-    def select_datapath(event):
-        try:
-            root = tk.Tk()
-            root.wm_attributes('-topmost', True)
-            root.withdraw()
-            if data_source_type.value=='file' or data_source_type.value=='batch':
-                data_filepath.value = str(filedialog.askopenfilenames(defaultextension='.MSEED', title='Select Data File'))
-            else:
-                data_filepath.value = str(filedialog.askdirectory(mustexist=True, title="Select Data Directory"))
-            root.destroy()
-        except Exception as e:
-            print(e)
-            browse_data_button.disabled=True
-            browse_data_button.description='Use Text Field'
-    browse_data_button.on_click(select_datapath)
-
-    # A text box labeled Metadata Filepath
-    metadata_filepath = widgets.Text(description='Metadata Filepath:',
-                                        style={'description_width': 'initial'},layout=widgets.Layout(width='70%'))
-
-    # A button next to it labeled "Browse"
-    browse_metadata_button = widgets.Button(description='Select File(s)', layout=widgets.Layout(width='10%'))
-    def select_metapath(event):
-        try:
-            root = tk.Tk()
-            root.wm_attributes('-topmost', True)
-            root.withdraw()
-            metadata_filepath.value = str(filedialog.askopenfilenames(title='Select Metadata File(s)'))
-            root.destroy()
-        except Exception as e:
-            print(e)
-            browse_metadata_button.disabled=True
-            browse_metadata_button.description='Use Text Field'
-    browse_metadata_button.on_click(select_metapath)
-
-    # A progress bar
-    progress_bar = widgets.FloatProgress(value=0.0,min=0.0,max=1.0,
-                                    bar_style='info',
-                                    orientation='horizontal',layout=widgets.Layout(width='85%'))
-
-    # A dark yellow button labeled "Read Data"
-    read_data_button = widgets.Button(description='Read Data',
-                                    button_style='warning',layout=widgets.Layout(width='10%'))
-
-
-    # A forest green button labeled "Process HVSR"
-    process_hvsr_button = widgets.Button(description='Run',
-                                            button_style='success',layout=widgets.Layout(width='5%'))
-
-    # Update input_param call
-    def update_input_param_call():
-        input_param_text = f"""(datapath='{data_filepath.value}', metapath='{metadata_filepath.value}', site='{site_name.value}', network='{network_textbox.value}',
-                    station='{station_textbox.value}', loc='{location_textbox.value}', channels={[z_channel_textbox.value, e_channel_textbox.value, n_channel_textbox.value]},
-                    acq_date='{acquisition_date_picker.value}', starttime='{start_time_picker.value}', endtime='{end_time_picker.value}', tzone='{time_zone_dropdown.value}',
-                    xcoord={xcoord_textbox.value}, ycoord={ycoord_textbox.value}, elevation={zcoord_textbox.value}, depth=0
-                    input_crs='{input_crs_textbox.value}', output_crs='{output_crs_textbox.value}', elev_unit='{elevation_unit_textbox.value}',
-                    instrument='{instrument_dropdown.value}', hvsr_band={[hvsr_band_min_box.value, hvsr_band_max_box.value]}, 
-                    peak_freq_range={[peak_freq_range_min_box.value, peak_freq_range_max_box.value]}, verbose={verbose_check.value})"""
-        input_params_call.value='<style>p {word-wrap: break-word}</style> <p>' + input_param_text + '</p>'
-    update_input_param_call()
-    
-    # Update fetch_data call
-    def update_fetch_data_call():
-        fetch_data_text = f"""(params=hvsr_data, source={data_source_type.value}, trim_dir={trim_directory.value},
-                            export_format={trim_export_dropdown.value}, detrend={detrend_type_dropdown.value}, detrend_order={detrend_order.value}, verbose={verbose_check.value})"""
-        fetch_data_call.value='<style>p {word-wrap: break-word}</style> <p>' + fetch_data_text + '</p>'
-    update_fetch_data_call()
-
-    site_hbox = widgets.HBox()
-    site_hbox.children = [site_name, tenpct_spacer, tenpct_spacer, tenpct_spacer, tenpct_spacer, tenpct_spacer, show_plot_check, verbose_check]
-    datapath_hbox = widgets.HBox()
-    datapath_hbox.children = [data_filepath, browse_data_button, data_source_type]
-    metadata_hbox = widgets.HBox()
-    metadata_hbox.children = [metadata_filepath, browse_metadata_button, instrument_dropdown]
-    progress_hbox = widgets.HBox()
-    progress_hbox.children = [progress_bar, read_data_button, process_hvsr_button]
-
-    input_params_vbox = widgets.VBox()
-    input_params_vbox.children = [site_hbox,datapath_hbox,metadata_hbox,progress_hbox]
-
-    input_accordion_box.children = [input_accordion]
-    #input_HBox.children = [input_accordion_label_box, input_accordion_box]
-    #input_HBox.layout= widgets.Layout(align_content='space-between')
-
-    # Create a GridBox with 12 rows and 20 columns
-    input_tab = widgets.GridBox(layout=widgets.Layout(grid_template_columns='repeat(10, 1)',
-                                                grid_template_rows='repeat(12, 1)'))
-
-    # Add the VBox to the GridBox
-    input_tab.children = [site_hbox,
-                            datapath_hbox,
-                            metadata_hbox,
-                            input_accordion_box,
-                            progress_hbox]
-
-    def get_input_params():
-        input_params_kwargs={
-            'datapath':data_filepath.value,
-            'metapath':metadata_filepath.value,
-            'site':site_name.value,
-            'instrument':instrument_dropdown.value,
-            'network':network_textbox.value, 'station':station_textbox.value, 'loc':location_textbox.value, 
-            'channels':[z_channel_textbox.value, e_channel_textbox.value, n_channel_textbox.value],
-            'starttime':start_time_picker.value,
-            'endtime':end_time_picker.value,
-            'tzone':time_zone_dropdown.value,
-            'xcoord':xcoord_textbox.value,
-            'ycoord':ycoord_textbox.value,
-            'elevation':zcoord_textbox.value, 'elev_unit':elevation_unit_textbox.value,'depth':0,
-            'input_crs':input_crs_textbox.value,'output_crs':output_crs_textbox.value,
-            'hvsr_band':[hvsr_band_min_box.value, hvsr_band_max_box.value],
-            'peak_freq_range':[peak_freq_range_min_box.value, peak_freq_range_max_box.value]}
-        return input_params_kwargs
-
-    def get_fetch_data_params():
-        fetch_data_kwargs = {
-            'source':data_source_type.value, 
-            'trim_dir':trim_directory.value,
-            'export_format':data_format_dropdown.value,
-            'detrend':detrend_type_dropdown.value,
-            'detrend_order':detrend_order.value}
-        if str(fetch_data_kwargs['detrend']).lower() == 'none':
-            fetch_data_kwargs['detrend'] = None
-        
-        if str(fetch_data_kwargs['trim_dir']).lower() == 'none':
-            fetch_data_kwargs['trim_dir'] = None
-        return fetch_data_kwargs
-
-    def read_data(button):
-        progress_bar.value = 0
-        log_textArea.value += f"\n\nREADING DATA [{datetime.datetime.now()}]"
-
-        ip_kwargs = get_input_params()
-        hvsr_data = sprit_hvsr.input_params(**ip_kwargs, verbose=verbose_check.value)
-        log_textArea.value += f"\n\n{datetime.datetime.now()}\ninput_params():\n'{ip_kwargs}"
-        if button.description=='Read Data':
-            progress_bar.value=0.333
-        else:
-            progress_bar.value=0.1
-        fd_kwargs = get_fetch_data_params()
-        hvsr_data = sprit_hvsr.fetch_data(hvsr_data, **fd_kwargs, verbose=verbose_check.value)
-        log_textArea.value += '\n\n'+str(datetime.datetime.now())+'\nfetch_data():\n\t'+str(fd_kwargs)
-        if button.description=='Read Data':
-            progress_bar.value=0.666
-        else:
-            progress_bar.value=0.2
-        
-        use_hv_curve_rmse.value=False
-        use_hv_curve_rmse.disabled=True
-
-        update_preview_fig(hvsr_data, preview_fig)
-
-        if button.description=='Read Data':
-            sprit_tabs.selected_index = 1
-            progress_bar.value=0
-        return hvsr_data
-    
-    read_data_button.on_click(read_data)
-
-    def get_remove_noise_kwargs():
-        def get_remove_method():
-            remove_method_list=[]
-            do_stalta = stalta_check.value
-            do_sat_pct = max_saturation_check.value
-            do_noiseWin=noisy_windows_check.value
-            do_warmcool=warmcool_check.value
-            
-            if auto_remove_check.value:
-                remove_method_list=['stalta', 'saturation', 'noise', 'warmcool']
-            else:
-                if do_stalta:
-                    remove_method_list.append('stalta')
-                if do_sat_pct:
-                    remove_method_list.append('saturation')
-                if do_noiseWin:
-                    remove_method_list.append('noise')
-                if do_warmcool:
-                    remove_method_list.append('warmcool')
-            
-            if not remove_method_list:
-                remove_method_list = None
-            return remove_method_list
-        
-        remove_noise_kwargs = {'remove_method':get_remove_method(),
-                                'sat_percent':max_saturation_pct.value, 
-                                'noise_percent':max_window_pct.value,
-                                'sta':sta.value,
-                                'lta':lta.value, 
-                                'stalta_thresh':[stalta_thresh_low.value, stalta_thresh_hi.value], 
-                                'warmup_time':warmup_time.value,
-                                'cooldown_time':cooldown_time.value,
-                                'min_win_size':noisy_window_length.value,
-                                'remove_raw_noise':raw_data_remove_check.value,
-                                'verbose':verbose_check.value}
-        return remove_noise_kwargs
-
-    def get_generate_ppsd_kwargs():
-        ppsd_kwargs = {
-            'skip_on_gaps':skip_on_gaps.value,
-            'db_bins':[db_bins_min.value, db_bins_max.value, db_bins_step.value],
-            'ppsd_length':ppsd_length.value,
-            'overlap':overlap_pct.value,
-            'special_handling':special_handling_dropdown.value,
-            'period_smoothing_width_octaves':period_smoothing_width.value,
-            'period_step_octaves':period_step_octave.value,
-            'period_limits':[period_limits_min.value, period_limits_max.value],
-            'verbose':verbose_check.value
-            }
-
-        if str(ppsd_kwargs['special_handling']).lower() == 'none':
-            ppsd_kwargs['special_handling'] = None        
-        return ppsd_kwargs
-
-    def get_remove_outlier_curve_kwargs():
-        roc_kwargs = {
-                'use_percentile':rmse_pctile_check.value,
-                'rmse_thresh':rmse_thresh.value,
-                'use_hv_curve':False,
-                'verbose':verbose_check.value
-            }
-        return roc_kwargs
-
-    def get_process_hvsr_kwargs():
-        if smooth_hv_curve_bool.value:
-            smooth_value = smooth_hv_curve.value
-        else:
-            smooth_value = smooth_hv_curve_bool.value
-
-        if resample_hv_curve_bool.value:
-            resample_value = resample_hv_curve.value
-        else:
-            resample_value = resample_hv_curve_bool.value
-
-        ph_kwargs={'method':h_combine_meth.value,
-                    'smooth':smooth_value,
-                    'freq_smooth':freq_smoothing.value,
-                    'f_smooth_width':freq_smooth_width.value,
-                    'resample':resample_value,
-                    'outlier_curve_rmse_percentile':use_hv_curve_rmse.value,
-                    'verbose':verbose_check.value}
-        return ph_kwargs
-
-    def get_check_peaks_kwargs():
-        cp_kwargs = {'hvsr_band':[hvsr_band_min_box.value, hvsr_band_max_box.value],
-                    'peak_freq_range':[peak_freq_range_min_box.value, peak_freq_range_max_box.value],
-                    'peak_selection':peak_selection_type.value,
-                    'verbose':verbose_check.value}
-        return cp_kwargs
-
-    def get_get_report_kwargs():
-        def get_formatted_plot_str():
-            # Initialize plot string
-            hvsr_plot_str = ''
-            comp_plot_str = ''
-            spec_plot_str = ''
-
-            # Whether to use each plot
-            if use_plot_hv.value:
-                hvsr_plot_str=hvsr_plot_str + "HVSR"
-            if use_plot_comp.value:
-                comp_plot_str=comp_plot_str + "C"
-            if use_plot_spec.value:
-                spec_plot_str=spec_plot_str + "SPEC"
-
-            # Whether components be on the same plot as HV curve?
-            if not combine_hv_comp.value:
-                comp_plot_str=comp_plot_str + "+"
-            else:
-                comp_plot_str=comp_plot_str.replace('+','')
-
-            # Whether to show (log) standard deviations
-            if not show_std_hv.value:
-                hvsr_plot_str=hvsr_plot_str + " -s"
-            if not show_std_comp.value:
-                comp_plot_str=comp_plot_str + " -s"                
-
-            # Whether to show all peaks
-            if show_all_peaks_hv.value:
-                hvsr_plot_str=hvsr_plot_str + " all"
-
-            # Whether curves from each time window are shown
-            if show_all_curves_hv.value:
-                hvsr_plot_str=hvsr_plot_str + " t"
-            if show_all_curves_comp.value:
-                comp_plot_str=comp_plot_str + " t"
-
-            # Whether the best peak is displayed
-            if show_best_peak_hv.value:
-                hvsr_plot_str=hvsr_plot_str + " p"
-            if show_best_peak_comp.value:
-                comp_plot_str=comp_plot_str + " p"
-            if show_best_peak_spec.value:
-                spec_plot_str=spec_plot_str + " p"
-
-            # Whether best peak value is annotated
-            if ann_best_peak_hv.value:
-                hvsr_plot_str=hvsr_plot_str + " ann"
-            if ann_best_peak_comp.value:
-                comp_plot_str=comp_plot_str + " ann"
-            if ann_best_peak_spec.value:
-                spec_plot_str=spec_plot_str + " ann"
-
-            # Whether peaks from individual time windows are shown
-            if show_ind_peaks_hv.value:
-                hvsr_plot_str=hvsr_plot_str + " tp"
-            if show_ind_peaks_spec.value:
-                spec_plot_str=spec_plot_str + ' tp'
-            
-            # Whether to show legend
-            if show_legend_hv.value:
-                hvsr_plot_str=hvsr_plot_str + " leg"
-            if ann_best_peak_comp.value:
-                comp_plot_str=comp_plot_str + " leg"
-            if show_legend_spec.value:
-                spec_plot_str=spec_plot_str + " leg"            
-
-            # Combine string into one
-            plot_str = hvsr_plot_str + ' ' + comp_plot_str+ ' ' + spec_plot_str
-            return plot_str
-
-        gr_kwargs = {'report_format':['print','csv'],
-                     'plot_type':get_formatted_plot_str(),
-                     'export_path':None,
-                     'csv_overwrite_opt':'overwrite',
-                     'no_output':False,
-                    'verbose':verbose_check.value
-                     }
-        return gr_kwargs
-
-    def process_data(button):
-        startProc=datetime.datetime.now()
-        progress_bar.value = 0
-        log_textArea.value += f"\n\nPROCESSING DATA [{startProc}]"
-        global hvsr_data
-        # Read data again only if internal hvsr_data datapath variable is different from what is in the gui
-        if not 'hvsr_data' in globals() or not hasattr(hvsr_data, 'datapath') or \
-                (pathlib.Path(hvsr_data.datapath).as_posix() != pathlib.Path(data_filepath.value).as_posix()):
-            hvsr_data = read_data(button)
-
-        remove_noise_kwargs = get_remove_noise_kwargs()
-        hvsr_data = sprit_hvsr.remove_noise(hvsr_data, **remove_noise_kwargs)
-        log_textArea.value += f"\n\n{datetime.datetime.now()}\nremove_noise()\n\t{remove_noise_kwargs}"
-        progress_bar.value = 0.3
-
-        generate_ppsd_kwargs = get_generate_ppsd_kwargs()
-        hvsr_data = sprit_hvsr.generate_ppsds(hvsr_data, **generate_ppsd_kwargs)
-        progress_bar.value = 0.5
-        log_textArea.value += f"\n\n{datetime.datetime.now()}\ngenerate_ppsds()\n\t{generate_ppsd_kwargs}"
-        
-       
-        # If this was started by clicking "Generate PPSDs", stop here
-        if button.description == 'Generate PPSDs':
-            return
-
-        ph_kwargs = get_process_hvsr_kwargs()
-        hvsr_data = sprit_hvsr.process_hvsr(hvsr_data, **ph_kwargs)
-        log_textArea.value += f"\n\n{datetime.datetime.now()}\nprocess_hvsr()\n\t{ph_kwargs}"
-        progress_bar.value = 0.75
-        update_outlier_fig()
-
-        roc_kwargs = get_remove_outlier_curve_kwargs()
-        hvsr_data = sprit_hvsr.remove_outlier_curves(hvsr_data, **roc_kwargs)
-        log_textArea.value += f"\n\n{datetime.datetime.now()}\nremove_outlier_curves()\n\t{roc_kwargs}"
-        progress_bar.value = 0.85
-        outlier_fig, hvsr_data = update_outlier_fig()
-
-        use_hv_curve_rmse.value=False
-        use_hv_curve_rmse.disabled=False
-
-        def get_rmse_range():
-            minRMSE = 10000
-            maxRMSE = -1
-            if roc_kwargs['use_hv_curve']:
-                colnames = ['HV_Curves']
-            else:
-                colnames = ['psd_values_Z',
-                            'psd_values_E',
-                            'psd_values_N']
-            dataList = []
-            for col in colnames:
-                dataArr = np.stack(hvsr_data.hvsr_df[col])
-                medCurveArr = np.nanmedian(dataArr, axis=0)
-                rmse = np.sqrt(((np.subtract(dataArr, medCurveArr)**2).sum(axis=1))/dataArr.shape[1])
-                if rmse.min() < minRMSE:
-                    minRMSE = rmse.min()
-                if rmse.max() > maxRMSE:
-                    maxRMSE = rmse.max()
-            rmse_thresh_slider.min = minRMSE
-            rmse_thresh_slider.max = maxRMSE
-            rmse_thresh_slider.step = round((maxRMSE-minRMSE)/100, 2)
-            rmse_thresh_slider.value = maxRMSE
-        get_rmse_range()
-
-        cp_kwargs = get_check_peaks_kwargs()
-        hvsr_data = sprit_hvsr.check_peaks(hvsr_data, **cp_kwargs)
-        log_textArea.value += f"\n\n{datetime.datetime.now()}\ncheck_peaks()\n\t{cp_kwargs}"
-        progress_bar.value = 0.9
-
-        gr_kwargs = get_get_report_kwargs()
-        hvsr_data = sprit_hvsr.get_report(hvsr_data, **gr_kwargs)
-        log_textArea.value += f"\n\n{datetime.datetime.now()}\nget_report()\n\t{gr_kwargs}\n\n"
-        hvsr_data.get_report(report_format='print') # Just in case print wasn't included
-        log_textArea.value += hvsr_data['Print_Report']
-        printed_results_textArea.value = hvsr_data['Print_Report']
-        hvsr_data.get_report(report_format='csv') 
-        results_table.value = hvsr_data['CSV_Report'].to_html()
-        
-        log_textArea.value += f'Processing time: {datetime.datetime.now() - startProc}'
-        progress_bar.value = 0.95
-
-        update_results_fig(hvsr_data, gr_kwargs['plot_type'])
-        
-        progress_bar.value = 1
-        global hvsr_results
-        hvsr_results = hvsr_data
-        return hvsr_results
-        
-    def parse_plot_string(plot_string):
-        plot_list = plot_string.split()
-
-        hvsrList = ['hvsr', 'hv', 'h']
-        compList = ['component', 'comp', 'c']
-        compPlus = [item + '+' for item in compList]
-        specList = ['spectrogram', 'specgram', 'spec','sg', 's']
-
-        hvInd = np.nan
-        compInd = np.nan
-        specInd = np.nan
-
-        hvIndFound = False
-        compIndFound = False
-        specIndFound = False
-
-        for i, item in enumerate(plot_list):
-            if item.lower() in hvsrList and not hvIndFound:
-                # assign the index
-                hvInd = i
-                hvIndFound = True
-            if (item.lower() in compList or item.lower() in compPlus) and not compIndFound:
-                # assign the index
-                compInd = i
-                compIndFound = True
-            if item.lower() in specList and not specIndFound:
-                # assign the index
-                specInd = i
-                specIndFound = True
-
-        # Get individual plot lists (should already be correctly ordered)
-        if hvInd is np.nan:
-            hvsr_plot_list = ['HVSR']
-
-        if compInd is np.nan:
-            comp_plot_list = []
-            if specInd is np.nan:
-                if hvInd is not np.nan:
-                    hvsr_plot_list = plot_list
-                spec_plot_list = []
-            else:
-                if hvInd is not np.nan:
-                    hvsr_plot_list = plot_list[hvInd:specInd]
-                spec_plot_list = plot_list[specInd:]
-        else:
-            if hvInd is not np.nan:
-                hvsr_plot_list = plot_list[hvInd:compInd]
-            
-            if specInd is np.nan:
-                comp_plot_list = plot_list[compInd:]
-                spec_plot_list = []
-            else:
-                comp_plot_list = plot_list[compInd:specInd]
-                spec_plot_list = plot_list[specInd:]
-
-        # Figure out how many subplots there will be
-        plot_list_list = [hvsr_plot_list, comp_plot_list, spec_plot_list]
-
-        return plot_list_list
-
-    def parse_hv_plot_list(hv_data, hvsr_plot_list):
-        hvsr_data = hv_data
-        x_data = hvsr_data.x_freqs['Z']
-        hvsrDF = hvsr_data.hvsr_df
-
-        if 'tp' in hvsr_plot_list:
-            allpeaks = []
-            for row in hvsrDF[hvsrDF['Use']]['CurvesPeakFreqs'].values:
-                for peak in row:
-                    allpeaks.append(peak)
-            allInd = []
-            for row, peakList in enumerate(hvsrDF[hvsrDF['Use']]['CurvesPeakIndices'].values):
-                for ind in peakList:
-                    allInd.append((row, ind))
-            x_vals = []
-            y_vals = []
-            y_max = np.nanmax(hvsr_data.hvsrp)
-            hvCurveInd = list(hvsrDF.columns).index('HV_Curves')
-            for i, tp in enumerate(allpeaks):
-                x_vals.extend([tp, tp, None]) # add two x values and a None
-                y_vals.extend([0, hvsrDF.iloc[allInd[i][0], hvCurveInd][allInd[i][1]], None]) # add the first and last y values and a None            
-
-            results_fig.add_trace(go.Scatter(x=x_vals, y=y_vals, mode='lines',
-                                            line=dict(width=4, dash="solid", 
-                                            color="rgba(128,0,0,0.1)"), 
-                                            name='Best Peaks Over Time'),
-                                            row=1, col=1)
-
-        if 't' in hvsr_plot_list:
-            alltimecurves = np.stack(hvsrDF[hvsrDF['Use']]['HV_Curves'])
-            for i, row in enumerate(alltimecurves):
-                if i==0:
-                    showLeg = True
-                else:
-                    showLeg= False
-                results_fig.add_trace(go.Scatter(x=x_data[:-1], y=row, mode='lines',
-                                            line=dict(width=0.5, dash="solid", 
-                                            color="rgba(100, 110, 100, 0.8)"), 
-                                            showlegend=showLeg, 
-                                            name='Ind. time win. curve', 
-                                            hoverinfo='none'),
-                                            row=1, col=1)
-
-        if 'all' in hvsr_plot_list:
-            for i, p in enumerate(hvsr_data['hvsr_peak_freqs']):
-                if i==0:
-                    showLeg = True
-                else:
-                    showLeg= False
-
-                results_fig.add_trace(go.Scatter(
-                    x=[p, p, None], # set x to None
-                    y=[0, np.nanmax(np.stack(hvsrDF['HV_Curves'])),None], # set y to None
-                    mode="lines", # set mode to lines
-                    line=dict(width=1, dash="dot", color="gray"), # set line properties
-                    name="All checked peaks", # set legend name
-                    showlegend=showLeg),
-                    row=1, col=1)
-
-        if '-s' not in hvsr_plot_list:
-            # Show standard deviation
-            results_fig.add_trace(go.Scatter(x=x_data, y=hvsr_data.hvsrp2,
-                                    line={'color':'black', 'width':0.1},marker=None, 
-                                    showlegend=False, name='Log. St.Dev. Upper',
-                                    hoverinfo='none'),
-                                    row=1, col=1)
-            
-            results_fig.add_trace(go.Scatter(x=x_data, y=hvsr_data.hvsrm2,
-                                    line={'color':'black', 'width':0.1},marker=None,
-                                    fill='tonexty', fillcolor="rgba(128, 128, 128, 0.6)",
-                                    name='Log. St.Dev.', hoverinfo='none'),
-                                    row=1, col=1)
-                
-        if 'p' in hvsr_plot_list:
-            results_fig.add_trace(go.Scatter(
-                x=[hvsr_data['BestPeak']['f0'], hvsr_data['BestPeak']['f0'], None], # set x to None
-                y=[0,np.nanmax(np.stack(hvsrDF['HV_Curves'])),None], # set y to None
-                mode="lines", # set mode to lines
-                line=dict(width=1, dash="dash", color="black"), # set line properties
-                name="Best Peak"),
-                row=1, col=1)
-
-        if 'ann' in hvsr_plot_list:
-            # Annotate best peak
-            results_fig.add_annotation(x=np.log10(hvsr_data['BestPeak']['f0']),
-                                    y=0, yanchor='bottom', xanchor='center',
-                                    text=f"{hvsr_data['BestPeak']['f0']:.3f} Hz",
-                                    bgcolor='rgba(255, 255, 255, 0.7)',
-                                    showarrow=False,
-                                    row=1, col=1)
-        return results_fig
-
-    def parse_comp_plot_list(hv_data, comp_plot_list):
-        
-        hvsr_data = hv_data
-        # Initial setup
-        x_data = hvsr_data.x_freqs['Z']
-        hvsrDF = hvsr_data.hvsr_df
-        same_plot = ((comp_plot_list != []) and ('+' not in comp_plot_list[0]))
-
-        if same_plot:
-            yaxis_to_use = 'y2'
-            use_secondary = True
-            transparency_modifier = 0.5
-        else:
-            yaxis_to_use = 'y'
-            use_secondary=False
-            transparency_modifier = 1
-
-        alpha = 0.4 * transparency_modifier
-        components = ['Z', 'E', 'N']
-        compColor_semi_light = {'Z':f'rgba(128,128,128,{alpha})',
-                    'E':f'rgba(0,0,128,{alpha})',
-                    'N':f'rgba(128,0,0,{alpha})'}
-
-        alpha = 0.7 * transparency_modifier
-        compColor_semi = {'Z':f'rgba(128,128,128,{alpha})', 
-                        'E':f'rgba(100,100,128,{alpha})', 
-                        'N':f'rgba(128,100,100,{alpha})'}
-
-        compColor = {'Z':f'rgba(128,128,128,{alpha})', 
-                    'E':f'rgba(100,100,250,{alpha})', 
-                    'N':f'rgba(250,100,100,{alpha})'}
-
-        # Whether to plot in new subplot or not
-        if  comp_plot_list != [] and '+' in comp_plot_list[0]:
-            compRow=2
-        else:
-            compRow=1
-
-        # Whether to plot individual time curves
-        if 't' in comp_plot_list:
-            for comp in components:
-                alltimecurves = np.stack(hvsrDF[hvsrDF['Use']]['psd_values_'+comp])
-                for i, row in enumerate(alltimecurves):
-                    if i==0:
-                        showLeg = True
-                    else:
-                        showLeg= False
-                    
-                    results_fig.add_trace(go.Scatter(x=x_data[:-1], y=row, mode='lines',
-                                    line=dict(width=0.5, dash="solid", 
-                                    color=compColor_semi[comp]),
-                                    name='Ind. time win. curve',
-                                    showlegend=False,
-                                    hoverinfo='none',
-                                    yaxis=yaxis_to_use),
-                                    secondary_y=use_secondary,
-                                    row=compRow, col=1)
-
-        # Code to plot standard deviation windows, if not removed
-        if '-s' not in comp_plot_list:
-            for comp in components:
-                # Show standard deviation
-                results_fig.add_trace(go.Scatter(x=x_data, y=hvsr_data.ppsd_std_vals_p[comp],
-                                        line={'color':compColor_semi_light[comp], 'width':0.1},marker=None, 
-                                        showlegend=False, name='Log. St.Dev. Upper',
-                                        hoverinfo='none',    
-                                        yaxis=yaxis_to_use),
-                                        secondary_y=use_secondary,
-                                        row=compRow, col=1)
-                
-                results_fig.add_trace(go.Scatter(x=x_data, y=hvsr_data.ppsd_std_vals_m[comp],
-                                        line={'color':compColor_semi_light[comp], 'width':0.1},marker=None,
-                                        fill='tonexty', fillcolor=compColor_semi_light[comp],
-                                        name=f'St.Dev. [{comp}]', hoverinfo='none', showlegend=False, 
-                                        yaxis=yaxis_to_use),
-                                        secondary_y=use_secondary,
-                                        row=compRow, col=1)
-                
-        # Code to plot location of best peak
-        if 'p' in comp_plot_list:
-            minVal = 10000
-            maxVal = -10000
-            for comp in components:
-                currPPSDCurve = hvsr_data['psd_values_tavg'][comp]
-                if np.nanmin(currPPSDCurve) < minVal:
-                    minVal = np.nanmin(currPPSDCurve)
-                if np.nanmax(currPPSDCurve) > maxVal:
-                    maxVal = np.nanmax(currPPSDCurve)
-
-            results_fig.add_trace(go.Scatter(
-                x=[hvsr_data['BestPeak']['f0'], hvsr_data['BestPeak']['f0'], None], # set x to None
-                y=[minVal,maxVal,None], # set y to None
-                mode="lines", # set mode to lines
-                line=dict(width=1, dash="dash", color="black"), # set line properties
-                name="Best Peak",
-                yaxis=yaxis_to_use),
-                secondary_y=use_secondary,
-                row=compRow, col=1)
-            
-        # Code to annotate value of best peak
-        if 'ann' in comp_plot_list:
-            minVal = 10000
-            for comp in components:
-                currPPSDCurve = hvsr_data['psd_values_tavg'][comp]
-                if np.nanmin(currPPSDCurve) < minVal:
-                    minVal = np.nanmin(currPPSDCurve)
-            results_fig.add_annotation(x=np.log10(hvsr_data['BestPeak']['f0']),
-                            y=minVal,
-                            text=f"{hvsr_data['BestPeak']['f0']:.3f} Hz",
-                            bgcolor='rgba(255, 255, 255, 0.7)',
-                            showarrow=False,
-                            yref=yaxis_to_use,
-                            secondary_y=use_secondary,
-                            row=compRow, col=1)
-
-        # Plot the main averaged component PPSDs
-        for comp in components:
-            results_fig.add_trace(go.Scatter(x=hvsr_data.x_freqs[comp],
-                                            y=hvsr_data['psd_values_tavg'][comp],
-                                            line=dict(width=2, dash="solid", 
-                                            color=compColor[comp]),marker=None, 
-                                            name='PPSD Curve '+comp,    
-                                            yaxis=yaxis_to_use), 
-                                            secondary_y=use_secondary,
-                                            row=compRow, col='all')
-
-        # If new subplot, update accordingly
-        if compRow==2:
-            results_fig.update_xaxes(type='log',
-                            range=[np.log10(hvsr_data['hvsr_band'][0]), np.log10(hvsr_data['hvsr_band'][1])],
-                            row=compRow, col=1)
-        return results_fig
-
-    def parse_spec_plot_list(hv_data, spec_plot_list, subplot_num):
-        hvsr_data = hv_data
-        # Initial setup
-        hvsrDF = hvsr_data.hvsr_df
-        specAxisTimes = np.array([dt.isoformat() for dt in hvsrDF.index.to_pydatetime()])
-        y_data = hvsr_data.x_freqs['Z'][1:]
-        image_data = np.stack(hvsrDF['HV_Curves']).T
-
-        maxZ = np.percentile(image_data, 100)
-        minZ = np.percentile(image_data, 0)
-
-        use_mask = hvsr_data.hvsr_df.Use.values
-        use_mask = np.tile(use_mask, (image_data.shape[0],1))
-        use_mask = np.where(use_mask is False, np.nan, use_mask)
-
-        hmap = go.Heatmap(z=image_data,
-                    x=specAxisTimes,
-                    y=y_data,
-                    colorscale='Turbo',
-                    showlegend=False,
-                    #opacity=0.7,
-                    hovertemplate='Time [UTC]: %{x}<br>Frequency [Hz]: %{y:.2f}<br>H/V Amplitude: %{z:.2f}<extra></extra>',
-                    zmin=minZ,zmax=maxZ, showscale=False, name='HV Curve Amp. over Time')
-        results_fig.add_trace(hmap, row=subplot_num, col=1)
-
-        data_used = go.Heatmap(
-            x=specAxisTimes,
-            y=y_data,
-            z=use_mask,
-            showlegend=False,
-            colorscale=[[0, 'rgba(0,0,0,0.66)'], [0.25, 'rgba(0,0,0,0.66)'], [1, 'rgba(250,250,250,0)']],
-            showscale=False, name='Used')
-        results_fig.add_trace(data_used, row=subplot_num, col=1)
-
-
-        # tp currently is not being added to spec_plot_list
-        if 'tp' in spec_plot_list:
-            yvals = []
-            for row in hvsrDF['HV_Curves'].values:
-                maxInd = np.argmax(row)
-                yvals.append(y_data[maxInd])
-            tp_trace = go.Scatter(x=specAxisTimes, y=yvals, mode='markers',
-                                    line=None, marker=dict(color='white', size=2, line=dict(color='black', width=0.1)), name='Individual H/V Peaks')
-            results_fig.add_trace(tp_trace, row=subplot_num, col='all')
-
-        if 'p' in spec_plot_list:
-            results_fig.add_hline(y=hvsr_data['BestPeak']['f0'], line_width=1, line_dash='dash', line_color='black', row=subplot_num, col='all')
-
-        if 'ann' in spec_plot_list:
-            results_fig.add_annotation(x=specAxisTimes[-1],
-                                    y=hvsr_data['hvsr_band'][1],
-                                    text=f"Peak: {hvsr_data['BestPeak']['f0']:.3f} Hz",
-                                    bgcolor='rgba(255, 255, 255, 0.7)',
-                                    showarrow=False, xanchor='right', yanchor='top',
-                                    row=subplot_num, col='all')
-
-        if 'leg' in spec_plot_list:
-            pass
-
-        results_fig.update_yaxes(type='log',
-                        range=[np.log10(hvsr_data['hvsr_band'][0]), np.log10(hvsr_data['hvsr_band'][1])],
-                        row=subplot_num, col=1)
-
-        results_fig.add_annotation(
-            text=f"{hvsrDF['Use'].sum()}/{hvsrDF.shape[0]} windows used",
-            x=max(specAxisTimes),
-            y=np.log10(min(y_data))+(np.log10(max(y_data))-np.log10(min(y_data)))*0.01,
-            xanchor="right", yanchor="bottom",bgcolor='rgba(256,256,256,0.7)',
-            showarrow=False,row=subplot_num, col=1)
-
-        return results_fig
-
-    def update_results_fig(hv_data, plot_string):
-        global results_fig
-        global results_subp
-        hvsr_data = hv_data
-
-        if isinstance(hvsr_data, sprit_hvsr.HVSRBatch):
-            hvsr_data=hvsr_data[0]
-
-        hvsrDF = hvsr_data.hvsr_df
-
-        plot_list = parse_plot_string(plot_string)
-
-        combinedComp=False
-        noSubplots = 3 - plot_list.count([])
-        if plot_list[1] != [] and '+' not in plot_list[1][0]:
-            combinedComp = True
-            noSubplots -= 1
-        
-        # Get all data for each plotted item
-        # COMP Plot
-        # Figure out which subplot is which
-        if combinedComp:
-            comp_plot_row = 1
-            spec_plot_row = 2
-        else:
-            comp_plot_row = 2
-            spec_plot_row = 3
-
-        # Re-initialize results_fig
-        results_fig.data = []
-        results_fig.update_layout(grid=None)  # Clear the existing grid layout
-        if not combinedComp: 
-            results_subp = subplots.make_subplots(rows=3, cols=1, horizontal_spacing=0.01, vertical_spacing=0.07,
-                                                row_heights=[2, 1.5, 1])
-        else:
-            results_subp = subplots.make_subplots(rows=2, cols=1, horizontal_spacing=0.01, vertical_spacing=0.07,
-                                    specs =[[{'secondary_y': True}],
-                                            [{'secondary_y': False}]],
-                                            row_heights=[1, 1])
-        results_fig.update_layout(grid={'rows': noSubplots})
-        #del results_fig
-        results_fig = go.FigureWidget(results_subp)
-
-        results_fig = parse_comp_plot_list(hvsr_data, comp_plot_list=plot_list[1])
-
-        # HVSR Plot (plot this after COMP so it is on top COMP and to prevent deletion with no C+)
-        results_fig = parse_hv_plot_list(hvsr_data, hvsr_plot_list=plot_list[0])
-        # Will always plot the HV Curve
-        results_fig.add_trace(go.Scatter(x=hvsr_data.x_freqs['Z'],y=hvsr_data.hvsr_curve,
-                            line={'color':'black', 'width':1.5},marker=None, name='HVSR Curve'),
-                            row=1, col='all')
-
-        # SPEC plot
-        results_fig = parse_spec_plot_list(hvsr_data, spec_plot_list=plot_list[2], subplot_num=spec_plot_row)
-
-        # Final figure updating
-        showtickLabels = (plot_list[1]==[] or '+' not in plot_list[1][0])
-        if showtickLabels:
-            side='bottom'
-        else:
-            side='top'
-        results_fig.update_xaxes(type='log',
-                        range=[np.log10(hvsr_data['hvsr_band'][0]), np.log10(hvsr_data['hvsr_band'][1])],
-                        side='top',
-                        row=1, col=1)
-        
-        results_fig.update_xaxes(type='log',overlaying='x',
-                        range=[np.log10(hvsr_data['hvsr_band'][0]), np.log10(hvsr_data['hvsr_band'][1])],
-                        side='bottom',
-                        row=1, col=1)
-        if comp_plot_row!=1:
-            results_fig.update_xaxes(showticklabels=showtickLabels, row=comp_plot_row, col=1)
-        
-        if preview_fig.layout.width is None:
-            if outlier_fig.layout.width is None:
-                chartwidth = 800
-            else:
-                chartwidth = outlier_fig.layout.width
-
-        else:
-            chartwidth = preview_fig.layout.width
-
-        results_fig.update_layout(margin={"l":10, "r":10, "t":35, 'b':0},
-                                showlegend=False, autosize=True, height = 1.2 * float(chartwidth),
-                                title=f"{hvsr_data['site']} Results")
-        results_fig.update_yaxes(title_text='H/V Ratio', row=1, col=1)
-        results_fig.update_yaxes(title_text='H/V Over Time', row=noSubplots, col=1)
-        if comp_plot_row==1:
-            results_fig.update_yaxes(title_text="PPSD Amp\n[m2/s4/Hz][dB]", secondary_y=True, row=comp_plot_row, col=1)
-        else:
-            results_fig.update_yaxes(title_text="PPSD Amp\n[m2/s4/Hz][dB]", row=comp_plot_row, col=1)
-        
-        # Reset results_graph_widget and display 
-        with results_graph_widget:
-            clear_output(wait=True)
-            display(results_fig)
-
-        if show_plot_check.value:
-            results_fig.show()
-
-
-        sprit_tabs.selected_index = 4
-        log_textArea.value += f"\n\n{datetime.datetime.now()}\nResults Figure Updated: {plot_string}"
-      
-    process_hvsr_button.on_click(process_data)
-
-    # PREVIEW TAB
-    #Initialize plot
-    preview_subp = subplots.make_subplots(rows=4, cols=1, shared_xaxes=True, horizontal_spacing=0.01, vertical_spacing=0.01, row_heights=[3,1,1,1])
-    preview_fig = go.FigureWidget(preview_subp)
-
-    def update_preview_fig(hv_data, preview_fig):
-        preview_fig.data = []
-        
-        hvsr_data = hv_data
-        if isinstance(hvsr_data, sprit_hvsr.HVSRBatch):
-            hvsr_data=hvsr_data[0]
-
-        stream_z = hvsr_data['stream'].select(component='Z') #may be np.ma.masked_array
-        stream_e = hvsr_data['stream'].select(component='E') #may be np.ma.masked_array
-        stream_n = hvsr_data['stream'].select(component='N') #may be np.ma.masked_array
-
-        # Get iso_times and datetime.datetime
-        utcdt = stream_z[0].times(type='utcdatetime')
-        iso_times=[]
-        dt_times = []
-        for t in utcdt:
-            if t is not np.ma.masked:
-                iso_times.append(t.isoformat())
-                dt_times.append(datetime.datetime.fromisoformat(t.isoformat()))
-            else:
-                iso_times.append(np.nan)
-        iso_times=np.array(iso_times)
-        dt_times = np.array (dt_times)
-
-        # Generate spectrogram
-        f, t, Sxx = signal.spectrogram(x=stream_z[0].data, fs=stream_z[0].stats.sampling_rate, mode='magnitude')
-        
-        # Get times for the axis (one time per window)
-        axisTimes = []
-        for tpass in t:
-            axisTimes.append((dt_times[0]+datetime.timedelta(seconds=tpass)).isoformat())
-
-        # Add data to preview_fig
-        # Add spectrogram of Z component
-        minz = np.percentile(Sxx, 1)
-        maxz = np.percentile(Sxx, 99)
-        hmap = go.Heatmap(z=Sxx,
-                    x=axisTimes,
-                    y=f,
-                    colorscale='Turbo',
-                    showlegend=False,
-                    hovertemplate='Time [UTC]: %{x}<br>Frequency [Hz]: %{y:.2f}<br>Spectrogram Magnitude: %{z:.2f}<extra></extra>',
-                    zmin=minz, zmax=maxz, showscale=False, name='Z Component Spectrogram')
-        preview_fig.add_trace(hmap, row=1, col=1)
-        preview_fig.update_yaxes(type='log', range=[np.log10(hvsr_data['hvsr_band'][0]), np.log10(hvsr_data['hvsr_band'][1])], row=1, col=1)
-        preview_fig.update_yaxes(title={'text':'Spectrogram (Z)'}, row=1, col=1)
-
-        # Add raw traces
-        dec_factor=5 #This just makes the plotting go faster, by "decimating" the data
-        preview_fig.add_trace(go.Scatter(x=iso_times[::dec_factor], y=stream_z[0].data[::dec_factor],
-                                        line={'color':'black', 'width':0.5},marker=None, name='Z component data'), row=2, col='all')
-        preview_fig.update_yaxes(title={'text':'Z'}, row=2, col=1)
-        preview_fig.add_trace(go.Scatter(x=iso_times[::dec_factor], y=stream_e[0].data[::dec_factor],
-                                        line={'color':'blue', 'width':0.5},marker=None, name='E component data'),row=3, col='all')
-        preview_fig.update_yaxes(title={'text':'E'}, row=3, col=1)
-        preview_fig.add_trace(go.Scatter(x=iso_times[::dec_factor], y=stream_n[0].data[::dec_factor],
-                                        line={'color':'red', 'width':0.5},marker=None, name='N component data'), row=4, col='all')
-        preview_fig.update_yaxes(title={'text':'N'}, row=4, col=1)
-        
-        #preview_fig.add_trace(p)
-        preview_fig.update_layout(margin={"l":10, "r":10, "t":30, 'b':0}, showlegend=False,
-                                  title=f"{hvsr_data['site']} Data Preview")
-
-        if show_plot_check.value:
-            preview_fig.show()
-
-    # REMOVE NOISE SUBTAB
-    # STA/LTA Antitrigger
-    stalta_check = widgets.Checkbox(value=False, disabled=False, indent=False, description='STA/LTA Antitrigger')
-    sta = widgets.FloatText(description='STA [s]',  style={'description_width': 'initial'}, placeholder=5, value=5,layout=widgets.Layout(height='auto', width='auto'))
-    lta = widgets.FloatText(description='LTA [s]',  style={'description_width': 'initial'}, placeholder=30, value=30,layout=widgets.Layout(height='auto', width='auto'))
-    stalta_thresh_low = widgets.FloatText(description='STA/LTA Thresholds (low, high)',  style={'description_width': 'initial'}, placeholder=0.5, value=0.5,layout=widgets.Layout(height='auto', width='auto'))
-    stalta_thresh_hi = widgets.FloatText(style={'description_width': 'initial'}, placeholder=5, value=5,layout=widgets.Layout(height='auto', width='auto'))
-
-    #% Saturation Threshold
-    max_saturation_check = widgets.Checkbox(description='Percentage Threshold (Instantaneous)', value=False, disabled=False, indent=False)
-    max_saturation_pct = widgets.FloatText(description='Max Saturation %:',  style={'description_width': 'initial'}, placeholder=0.995, value=0.995,layout=widgets.Layout(height='auto', width='auto'))
-
-    #Noise Windows
-    noisy_windows_check = widgets.Checkbox(description='Noisy Windows', value=False, disabled=False, indent=False)
-    max_window_pct = widgets.FloatText(description='Max Window %:',  style={'description_width': 'initial'}, placeholder=0.8, value=0.8,layout=widgets.Layout(height='auto', width='auto'))
-    noisy_window_length = widgets.FloatText(description='Window Length [s]:',  style={'description_width': 'initial'}, placeholder=30, value=30,layout=widgets.Layout(height='auto', width='auto'))
-
-    #Warmup/cooldown
-    warmcool_check = widgets.Checkbox(description='Warmup & Cooldown Time', value=False, disabled=False, indent=False)
-    warmup_time = widgets.FloatText(description='Warmup time [s]:',  style={'description_width': 'initial'}, placeholder=0, value=0,layout=widgets.Layout(height='auto', width='auto'))
-    cooldown_time = widgets.FloatText(description='Cooldown time [s]:',  style={'description_width': 'initial'}, placeholder=0, value=0,layout=widgets.Layout(height='auto', width='auto'))
-
-    #STD Ratio
-    std_ratio_check = widgets.Checkbox(description='Standard Deviation Antitrigger (not yet implemented)', value=False, disabled=True, indent=False)
-    std_ratio_text = widgets.FloatText(description='StdDev Ratio:',  style={'description_width': 'initial'}, placeholder=0, value=0,layout=widgets.Layout(height='auto', width='auto'), disabled=True)
-    std_window_length_text = widgets.FloatText(description='Moving window Length [s]:',  style={'description_width': 'initial'}, placeholder=0, value=0,layout=widgets.Layout(height='auto', width='auto'),disabled=True)
-
-    #Autoremove
-    auto_remove_check = widgets.Checkbox(description='Use Auto Remove', value=False, disabled=False, indent=False)
-
-    #Remove from raw data
-    raw_data_remove_check = widgets.Checkbox(description='Remove Noise From Raw Data', value=False, disabled=False, indent=False)
-
-    #remove_noise call
-    remove_noise_prefix = widgets.HTML(value='<style>p {word-wrap: break-word}</style> <p>' + 'remove_noise' + '</p>', 
-                                       layout=widgets.Layout(width='fill', justify_content='flex-end',align_content='flex-start'))
-    remove_noise_call = widgets.HTML(value='()')
-    remove_noise_call_hbox = widgets.HBox([remove_noise_prefix, remove_noise_call])
-
-    # Update remove_outlier call
-    def update_remove_noise_call():
-        rnkwargs = get_remove_noise_kwargs()
-        rn_text = f"""(hvsr_data=hvsr_data, remove_method={rnkwargs['remove_method']}, 
-                    sat_percent={rnkwargs['sat_percent']}, 
-                    noise_percent={rnkwargs['noise_percent']}, 
-                    sta={rnkwargs['sta']}, 
-                    lta={rnkwargs['lta']}, 
-                    stalta_thresh={rnkwargs['stalta_thresh']}, 
-                    warmup_time={rnkwargs['warmup_time']}, 
-                    cooldown_time={rnkwargs['cooldown_time']}, 
-                    min_win_size={rnkwargs['min_win_size']}, 
-                    remove_raw_noise={rnkwargs['remove_raw_noise']}, 
-                    verbose={verbose_check.value})"""
-        remove_noise_call.value='<style>p {word-wrap: break-word}</style> <p>' + rn_text + '</p>'
-    update_remove_noise_call()
-
-    #Update noise windows
-    update_noise_windows_button = widgets.Button(description='Update Noise Windows',button_style='info',layout=widgets.Layout(height='auto', width='auto'), disabled=True)
-
-    preview_graph_widget = widgets.Output()
-    #progress bar (same as above)
-    preview_progress_hbox = widgets.HBox(children=[progress_bar, update_noise_windows_button, process_hvsr_button])
-
-    # Add it all in to the tab
-    stalta_hbox = widgets.HBox([stalta_check, sta, lta, stalta_thresh_low, stalta_thresh_hi])
-    sat_hbox = widgets.HBox([max_saturation_check, max_saturation_pct])
-    noise_win_hbox = widgets.HBox([noisy_windows_check, max_window_pct, noisy_window_length])
-    warmcool_hbox = widgets.HBox([warmcool_check, warmup_time, cooldown_time])
-    std_ratio_hbox = widgets.HBox([std_ratio_check, std_ratio_text, std_window_length_text])
-    spacer_hbox = widgets.HBox([tenpct_spacer])
-
-    preview_noise_tab = widgets.VBox([stalta_hbox,
-                                      sat_hbox,
-                                      noise_win_hbox,
-                                      warmcool_hbox,
-                                      std_ratio_hbox,
-                                      auto_remove_check,
-                                      raw_data_remove_check,
-                                      spacer_hbox,
-                                      remove_noise_call_hbox])
-
-    preview_graph_tab = widgets.VBox(children=[preview_graph_widget])
-    preview_subtabs = widgets.Tab([preview_graph_tab, preview_noise_tab])
-    preview_tab = widgets.VBox()
-
-    preview_subtabs.set_title(0, "Data Preview")
-    preview_subtabs.set_title(1, "Noise Removal")
-
-    preview_tab.children = [preview_subtabs, preview_progress_hbox]
-    # Initialize tab
-    with preview_graph_widget:
-        display(preview_fig)
-
-    # SETTINGS TAB
-    plot_settings_tab = widgets.GridspecLayout(18, ui_width)
-    settings_progress_hbox = widgets.HBox(children=[progress_bar, tenpct_spacer, process_hvsr_button])
-
-    # PPSD SETTINGS SUBTAB
-    ppsd_length_label = widgets.Label(value='Window Length for PPSDs:')
-    ppsd_length = widgets.FloatText(style={'description_width': 'initial'}, 
-                                    placeholder=20, value=20,layout=widgets.Layout(height='auto', width='auto'), disabled=False)
-    
-    overlap_pct_label = widgets.Label(value='Overlap %:')
-    overlap_pct = widgets.FloatText(style={'description_width': 'initial'}, 
-                                    placeholder=0.5, value=0.5, layout=widgets.Layout(height='auto', width='auto'), disabled=False)
-
-    period_step_label = widgets.Label(value='Period Step Octaves:')
-    period_step_octave = widgets.FloatText(style={'description_width': 'initial'}, 
-                                           placeholder=0.0625, value=0.0625, layout=widgets.Layout(height='auto', width='auto'), disabled=False)
-
-    skip_on_gaps_label = widgets.Label(value='Skip on gaps:')
-    skip_on_gaps = widgets.Checkbox(value=False, disabled=False, indent=False)
-
-    db_step_label = widgets.Label(value='dB bins:')
-    db_bins_min = widgets.FloatText(description='Min. dB', style={'description_width': 'initial'},
-                                    placeholder=-200, value=-200, layout=widgets.Layout(height='auto', width='auto'), disabled=False)
-    db_bins_max = widgets.FloatText(description='Max. dB', style={'description_width': 'initial'},
-                                    placeholder=-50, value=-50, layout=widgets.Layout(height='auto', width='auto'), disabled=False)
-    db_bins_step = widgets.FloatText(description='dB Step', style={'description_width': 'initial'},
-                                    placeholder=1, value=1, layout=widgets.Layout(height='auto', width='auto'), disabled=False)
-    
-    period_limit_label = widgets.Label(value='Period Limits:')
-    minPLim = round(1/(hvsr_band_max_box.value), 3)
-    maxPLim = round(1/(hvsr_band_min_box.value), 3)
-    period_limits_min = widgets.FloatText(description='Min. Period Limit', style={'description_width': 'initial'},
-                                    placeholder=minPLim, value=minPLim, layout=widgets.Layout(height='auto', width='auto'), disabled=False)
-    period_limits_max = widgets.FloatText(description='Max. Period Limit', style={'description_width': 'initial'},
-                                    placeholder=maxPLim, value=maxPLim, layout=widgets.Layout(height='auto', width='auto'), disabled=False)
-    period_smoothing_width = widgets.FloatText(description='Period Smoothing Width', style={'description_width': 'initial'},
-                                    placeholder=1, value=1, layout=widgets.Layout(height='auto', width='auto'), disabled=False)
-
-    special_handling_dropdown = widgets.Dropdown(description='Special Handling', value='none',
-                                                options=[('None', 'none'), ('Ringlaser', 'ringlaser'), ('Hydrophone', 'hydrophone')],
-                                            style={'description_width': 'initial'},  layout=widgets.Layout(height='auto', width='auto'), disabled=False)
-
-    #remove_noise call
-    generate_ppsd_prefix = widgets.HTML(value='<style>p {word-wrap: break-word}</style> <p>' + 'generate_ppsds' + '</p>', 
-                                       layout=widgets.Layout(width='fill', justify_content='flex-end',align_content='flex-start'))
-    generate_ppsd_call = widgets.HTML(value='()')
-    generate_ppsd_call_hbox = widgets.HBox([generate_ppsd_prefix, generate_ppsd_call])
-
-    # Update generate_ppsds() call
-    def update_generate_ppsd_call():
-        gppsdkwargs = get_generate_ppsd_kwargs()
-        gppsd_text = f"""(hvsr_data=hvsr_data, 
-                        stats=hvsr_data['stream'].select(component='*').traces[0].stats, 
-                        metadata=hvsr_data['paz']['*'], 
-                        skip_on_gaps={gppsdkwargs['skip_on_gaps']}, 
-                        db_bins={gppsdkwargs['db_bins']}, 
-                        ppsd_length={gppsdkwargs['ppsd_length']}, 
-                        overlap={gppsdkwargs['overlap']}, 
-                        special_handling={gppsdkwargs['special_handling']}, 
-                        period_smoothing_width_octaves={gppsdkwargs['period_smoothing_width_octaves']}, 
-                        period_step_octaves={gppsdkwargs['period_step_octaves']}, 
-                        period_limits={gppsdkwargs['period_limits']}, 
-                        verbose={verbose_check.value})"""
-        generate_ppsd_call.value='<style>p {word-wrap: break-word}</style> <p>' + gppsd_text + '</p>'
-    update_generate_ppsd_call()
-
-    ppsd_length_hbox = widgets.HBox([ppsd_length_label, ppsd_length])
-    overlap_pct_hbox = widgets.HBox([overlap_pct_label, overlap_pct])
-    pstep_hbox = widgets.HBox([period_step_label, period_step_octave])
-    skipgaps_hbox = widgets.HBox([skip_on_gaps_label, skip_on_gaps])
-    db_bins_hbox = widgets.HBox([db_step_label, db_bins_min, db_bins_max, db_bins_step])
-    plim_hbox = widgets.HBox([period_limit_label, period_limits_min, period_limits_max, period_smoothing_width])
-
-    ppsd_settings_tab = widgets.VBox([ppsd_length_hbox,
-                                      overlap_pct_hbox,
-                                      pstep_hbox,
-                                      skipgaps_hbox,
-                                      db_bins_hbox,
-                                      plim_hbox,
-                                      special_handling_dropdown,
-                                      generate_ppsd_call_hbox])
-
-    # OUTLIER SETTINGS SUBTAB
-    rmse_pctile_check = widgets.Checkbox(description='Using percentile', layout=widgets.Layout(height='auto', width='auto'), style={'description_width': 'initial'}, value=True)
-    rmse_thresh = widgets.FloatText(description='RMSE Threshold', style={'description_width': 'initial'},
-                                    placeholder=98, value=98, layout=widgets.Layout(height='auto', width='auto'), disabled=False)
-    use_hv_curve_rmse = widgets.Checkbox(description='Use HV Curve Outliers (may only be used after they have been calculated during the process_hvsr() step))', layout=widgets.Layout(height='auto', width='auto'), style={'description_width': 'initial'}, value=False, disabled=True)
-
-    outlier_threshbox_hbox = widgets.HBox(children=[rmse_thresh, rmse_pctile_check])
-    outlier_params_vbox = widgets.VBox(children=[outlier_threshbox_hbox, use_hv_curve_rmse])
-
-    global outlier_fig
-    outlier_fig = go.FigureWidget()
-    outlier_graph_widget = widgets.Output()
-
-    outlier_thresh_slider_label = widgets.Label(value='RMSE Thresholds:')
-    rmse_thresh_slider = widgets.FloatSlider(value=0, min=0, max=100, step=0.1,description='RMSE Value',layout=widgets.Layout(height='auto', width='auto'),disabled=True)
-    rmse_pctile_slider = widgets.FloatSlider(value=get_default(sprit_hvsr.remove_outlier_curves, 'rmse_thresh'), min=0, max=100, step=0.1, description="Percentile",layout=widgets.Layout(height='auto', width='auto'),)
-    
-    def calc_rmse(array_2d):
-        medArray = np.nanmedian(array_2d, axis=0)
-        rmse = np.sqrt(((np.subtract(array_2d, medArray)**2).sum(axis=1))/array_2d.shape[1])
-        return rmse
-    
-    def on_update_rmse_thresh_slider(change):
-        if use_hv_curve_rmse.value:
-            rmse = calc_rmse(np.stack(hvsr_data.hvsr_df['HV_Curves']))
-        else:
-            rmsez = calc_rmse(np.stack(hvsr_data.hvsr_df['psd_values_Z']))
-            rmsee = calc_rmse(np.stack(hvsr_data.hvsr_df['psd_values_E']))
-            rmsen = calc_rmse(np.stack(hvsr_data.hvsr_df['psd_values_N']))
-
-            rmse = np.stack([rmsez, rmsee, rmsen]).flatten()
-
-        if rmse_pctile_check.value:
-            rmse_thresh.value = rmse_pctile_slider.value
-        else:
-            rmse_thresh.value = rmse_thresh_slider.value
-            rmse_pctile_slider.value = ((rmse < rmse_thresh_slider.value).sum() / len(rmse)) * 100
-
-    def on_update_rmse_pctile_slider(change):
-        if use_hv_curve_rmse.value:
-            rmse = calc_rmse(np.stack(hvsr_data.hvsr_df['HV_Curves']))
-        else:
-            rmsez = calc_rmse(np.stack(hvsr_data.hvsr_df['psd_values_Z']))
-            rmsee = calc_rmse(np.stack(hvsr_data.hvsr_df['psd_values_E']))
-            rmsen = calc_rmse(np.stack(hvsr_data.hvsr_df['psd_values_N']))
-
-            rmse = np.stack([rmsez, rmsee, rmsen])
-
-        if rmse_pctile_check.value:
-            rmse_thresh_slider.value = np.percentile(rmse, rmse_pctile_slider.value)
-            rmse_thresh.value = rmse_pctile_slider.value
-        else:
-            rmse_thresh.value = rmse_thresh_slider.value
-
-    def on_update_rmse_pctile_check(change):
-        if rmse_pctile_check.value:
-            rmse_pctile_slider.disabled = False
-            rmse_thresh_slider.disabled = True
-        else:
-            rmse_pctile_slider.disabled = True
-            rmse_thresh_slider.disabled = False
-    
-    def on_update_rmse_thresh(change):
-        if rmse_pctile_check.value:
-            rmse_pctile_slider.value = rmse_thresh.value
-        else:
-            rmse_thresh_slider.value = rmse_thresh.value
-
-    rmse_pctile_check.observe(on_update_rmse_pctile_check)
-    rmse_thresh_slider.observe(on_update_rmse_thresh_slider)
-    rmse_pctile_slider.observe(on_update_rmse_pctile_slider)
-    rmse_thresh.observe(on_update_rmse_thresh)
-
-    use_hv_curve_label = widgets.Label(value='NOTE: Outlier curves may only be identified after PPSDs have been calculated (during the generate_ppsds() step)', layout=widgets.Layout(height='auto', width='80%'))
-    generate_ppsd_button = widgets.Button(description='Generate PPSDs', layout=widgets.Layout(height='auto', width='20%', justify_content='flex-end'), disabled=False)
-    update_outlier_plot_button = widgets.Button(description='Remove Outliers', layout=widgets.Layout(height='auto', width='20%', justify_content='flex-end'), disabled=False)
-    outlier_ppsd_hbox = widgets.HBox([use_hv_curve_label, generate_ppsd_button, update_outlier_plot_button])
-    remove_outlier_curve_prefix = widgets.HTML(value='<style>p {word-wrap: break-word}</style> <p>' + 'remove_outlier_curves' + '</p>', 
-                                       layout=widgets.Layout(width='fill', justify_content='flex-end',align_content='flex-start'))
-    remove_outlier_curve_call = widgets.HTML(value='()')
-    remove_outlier_hbox = widgets.HBox([remove_outlier_curve_prefix, remove_outlier_curve_call])
-
-    # Update remove_outlier call
-    def update_remove_outlier_curve_call():
-        roc_text = f"""(hvsr_data=hvsr_data, rmse_thresh={rmse_thresh.value}, use_percentile={rmse_pctile_check.value},
-                            use_hv_curve={use_hv_curve_rmse.value}...verbose={verbose_check.value})"""
-        remove_outlier_curve_call.value='<style>p {word-wrap: break-word}</style> <p>' + roc_text + '</p>'
-    update_remove_outlier_curve_call()
-
-    def update_outlier_fig_button(button):
-        outlier_fig, hvsr_data = update_outlier_fig(button)
-
-    generate_ppsd_button.on_click(process_data)
-
-    update_outlier_plot_button.on_click(update_outlier_fig_button)
-
-    outlier_settings_tab = widgets.VBox(children=[outlier_params_vbox,
-                                                  outlier_graph_widget,
-                                                  outlier_thresh_slider_label,
-                                                  rmse_thresh_slider,
-                                                  rmse_pctile_slider,
-                                                  outlier_ppsd_hbox,
-                                                  remove_outlier_hbox])
-
-    with outlier_graph_widget:
-        display(outlier_fig)
-
-    def update_outlier_fig(input=None, _rmse_thresh=rmse_pctile_slider.value, _use_percentile=True, _use_hv_curve=use_hv_curve_rmse.value, _verbose=verbose_check.value):
-        global outlier_fig
-        global hvsr_data
-        hv_data = hvsr_data
-
-        roc_kwargs = {'rmse_thresh':rmse_pctile_slider.value,
-                        'use_percentile':True,
-                        'use_hv_curve':use_hv_curve_rmse.value,
-                        'show_outlier_plot':False,
-                        'verbose':verbose_check.value
-                      }
-        if 'PPSDStatus' in hvsr_data.ProcessingStatus.keys() and hvsr_data.ProcessingStatus['PPSDStatus']:
-            log_textArea.value += f"\n\n{datetime.datetime.now()}\nremove_outlier_curves():\n'{roc_kwargs}"    
-            hvsr_data = sprit_hvsr.remove_outlier_curves(hvsr_data, **roc_kwargs)
-        else:
-            log_textArea.value += f"\n\n{datetime.datetime.now()}\nremove_outlier_curves() attempted, but not completed. hvsr_data.ProcessingStatus['PPSDStatus']=False\n'{roc_kwargs}"
-            return outlier_fig, hvsr_data
-
-        if roc_kwargs['use_hv_curve']:
-            no_subplots = 1
-            if hasattr(hvsr_data, 'hvsr_df') and 'HV_Curves' in hvsr_data.hvsr_df.columns:
-                outlier_fig.data = []
-                outlier_fig.update_layout(grid=None)  # Clear the existing grid layout
-                outlier_subp = subplots.make_subplots(rows=no_subplots, cols=1, horizontal_spacing=0.01, vertical_spacing=0.1)
-                outlier_fig.update_layout(grid={'rows': 1})
-                outlier_fig = go.FigureWidget(outlier_subp)
-
-                x_data = hvsr_data['x_freqs']
-                curve_traces = []
-                for hv in hvsr_data.hvsr_df['HV_Curves'].iterrows():
-                    curve_traces.append(go.Scatter(x=x_data, y=hv[1]))
-                outlier_fig.add_traces(curve_traces)
-                
-                # Calculate a median curve, and reshape so same size as original
-                medCurve = np.nanmedian(np.stack(hvsr_data.hvsr_df['HV_Curves']), axis=0)
-                outlier_fig.add_trace(go.Scatter(x=x_data, y=medCurve, line=dict(color='rgba(0,0,0,1)', width=1.5),showlegend=False))
-                
-                minY = np.nanmin(np.stack(hvsr_data.hvsr_df['HV_Curves']))
-                maxY = np.nanmax(np.stack(hvsr_data.hvsr_df['HV_Curves']))
-                totalWindows = hvsr_data.hvsr_df.shape[0]
-                #medCurveArr = np.tile(medCurve, (curr_data.shape[0], 1))
-
-        else:
-            no_subplots = 3
-            outlier_fig.data = []
-            outlier_fig.update_layout(grid=None)  # Clear the existing grid layout
-            outlier_subp = subplots.make_subplots(rows=no_subplots, cols=1, horizontal_spacing=0.01, vertical_spacing=0.02,
-                                                    row_heights=[1, 1, 1])
-            outlier_fig.update_layout(grid={'rows': 3})
-            outlier_fig = go.FigureWidget(outlier_subp)
-
-            if hasattr(hvsr_data, 'hvsr_df'):
-                rowDict = {'Z':1, 'E':2, 'N':3}
-                showTLabelsDict={'Z':False, 'E':False, 'N':True}
-                def comp_rgba(comp, a):
-                    compstr = ''
-                    if comp=='Z':
-                        compstr = f'rgba(0, 0, 0, {a})'
-                    if comp=='E':
-                        compstr = f'rgba(50, 50, 250, {a})'
-                    if comp=='N':
-                        compstr = f'rgba(250, 50, 50, {a})'
-                    return compstr                         
-                compNames = ['Z', 'E', 'N']
-                rmse_to_plot=[]
-                med_traces=[]
-
-                noRemoved = 0
-                indRemoved = []
-                for i, comp in enumerate(compNames):
-                    if hasattr(hvsr_data, 'x_freqs'):
-                        x_data = hvsr_data['x_freqs'][comp]
-                    else:
-                        x_data = [1/p for p in hvsr_data['ppsds'][comp]['period_xedges'][1:]]                    
-                    column = 'psd_values_'+comp
-                    # Retrieve data from dataframe (use all windows, just in case)
-                    curr_data = np.stack(hvsr_data['hvsr_df'][column])
-                    
-                    # Calculate a median curve, and reshape so same size as original
-                    medCurve = np.nanmedian(curr_data, axis=0)
-                    medCurveArr = np.tile(medCurve, (curr_data.shape[0], 1))
-                    medTrace = go.Scatter(x=x_data, y=medCurve, line=dict(color=comp_rgba(comp, 1), width=1.5), 
-                                                 name=f'{comp} Component', showlegend=True)
-                    # Calculate RMSE
-                    rmse = np.sqrt(((np.subtract(curr_data, medCurveArr)**2).sum(axis=1))/curr_data.shape[1])
-
-                    rmse_threshold = np.percentile(rmse, roc_kwargs['rmse_thresh'])
-                    
-                    # Retrieve index of those RMSE values that lie outside the threshold
-                    timeIndex = hvsr_data['hvsr_df'].index
-                    for j, curve in enumerate(curr_data):
-                        if rmse[j] > rmse_threshold:
-                            badTrace = go.Scatter(x=x_data, y=curve,
-                                                line=dict(color=comp_rgba(comp, 1), width=1.5, dash='dash'),
-                                                #marker=dict(color=comp_rgba(comp, 1), size=3),
-                                                name=str(hvsr_data.hvsr_df.index[j]), showlegend=False)
-                            outlier_fig.add_trace(badTrace, row=rowDict[comp], col=1)
-                            if j not in indRemoved:
-                                indRemoved.append(j)
-                            noRemoved += 1
-                        else:
-                            goodTrace = go.Scatter(x=x_data, y=curve,
-                                                  line=dict(color=comp_rgba(comp, 0.01)), name=str(hvsr_data.hvsr_df.index[j]), showlegend=False)
-                            outlier_fig.add_trace(goodTrace, row=rowDict[comp], col=1)
-
-                    timeIndRemoved = pd.DatetimeIndex([timeIndex[ind] for ind in indRemoved])
-                    hvsr_data['hvsr_df'].loc[timeIndRemoved, 'Use'] = False
-
-                    outlier_fig.add_trace(medTrace, row=rowDict[comp], col=1)
-                    
-                    outlier_fig.update_xaxes(showticklabels=False, row=1, col=1)
-                    outlier_fig.update_yaxes(title={'text':'Z'}, row=1, col=1)
-                    outlier_fig.update_xaxes(showticklabels=False, row=2, col=1)
-                    outlier_fig.update_yaxes(title={'text':'E'}, row=2, col=1)
-                    outlier_fig.update_xaxes(showticklabels=True, row=3, col=1)
-                    outlier_fig.update_yaxes(title={'text':'N'}, row=3, col=1)
-
-                    outlier_fig.update_layout(margin={"l":10, "r":10, "t":30, 'b':0}, showlegend=True,
-                                  title=f"{hvsr_data['site']} Outliers")
-                    if comp == 'N':
-                        minY = np.nanmin(curr_data)
-                        maxY = np.nanmax(curr_data)
-                    totalWindows = curr_data.shape[0]
-                
-                outlier_fig.add_annotation(
-                    text=f"{len(indRemoved)}/{totalWindows} outlier windows removed",
-                    x=np.log10(max(x_data)) - (np.log10(max(x_data))-np.log10(min(x_data))) * 0.01,
-                    y=minY+(maxY-minY)*0.01,
-                    xanchor="right", yanchor="bottom",#bgcolor='rgba(256,256,256,0.7)',
-                    showarrow=False,row=no_subplots, col=1)
-
-
-        outlier_fig.update_xaxes(type='log')
-        with outlier_graph_widget:
-            clear_output(wait=True)
-            display(outlier_fig)
-        
-        if show_plot_check.value:
-            outlier_fig.show()
-
-        return outlier_fig, hvsr_data
-
-    # HVSR SETTINGS SUBTAB
-    h_combine_meth = widgets.Dropdown(description='Horizontal Combination Method', value=3,
-                                    options=[('1. Differential Field Assumption (not implemented)', 1), 
-                                             ('2. Arithmetic Mean |  H = (N + E)/2', 2), 
-                                             ('3. Geometric Mean | H = √(N * E) (SESAME recommended)', 3),
-                                             ('4. Vector Summation | H = √(N^2 + E^2)', 4),
-                                             ('5. Quadratic Mean | H = √(N^2 + E^2)/2', 5),
-                                             ('6. Maximum Horizontal Value | H = max(N, E)', 6)],
-                                    style={'description_width': 'initial'},  layout=widgets.Layout(height='auto', width='auto'), disabled=False)
-
-    freq_smoothing = widgets.Dropdown(description='Frequency Smoothing Operations', value='konno ohmachi',
-                                    options=[('Konno-Ohmachi', 'konno ohmachi'),
-                                             ('Constant','constant'),
-                                             ('Proportional', 'proportional'),
-                                             ('None', None)],
-                                    style={'description_width': 'initial'},  layout=widgets.Layout(height='auto', width='auto'), disabled=False)
-    freq_smooth_width = widgets.FloatText(description='Freq. Smoothing Width', style={'description_width': 'initial'},
-                                    placeholder=40, value=40, layout=widgets.Layout(height='auto', width='auto'), disabled=False)
-
-    resample_hv_curve_bool = widgets.Checkbox(layout=widgets.Layout(height='auto', width='auto'), style={'description_width': 'initial'}, value=True)
-    resample_hv_curve = widgets.IntText(description='Resample H/V Curve', style={'description_width': 'initial'},
-                                    placeholder=500, value=500, layout=widgets.Layout(height='auto', width='auto'), disabled=False)
-
-    smooth_hv_curve_bool = widgets.Checkbox(layout=widgets.Layout(height='auto', width='auto'), style={'description_width': 'initial'}, value=True)
-    smooth_hv_curve = widgets.IntText(description='Smooth H/V Curve', style={'description_width': 'initial'},
-                                    placeholder=51, value=51, layout=widgets.Layout(height='auto', width='auto'), disabled=False)
-
-    hvsr_band_hbox_hvsrSet = widgets.HBox([hvsr_band_min_box, hvsr_band_max_box],layout=widgets.Layout(height='auto', width='auto'))
-
-    peak_freq_range_hbox_hvsrSet = widgets.HBox([peak_freq_range_min_box, peak_freq_range_max_box],layout=widgets.Layout(height='auto', width='auto'))
-
-    peak_selection_type = widgets.Dropdown(description='Peak Selection Method', value='max',
-                                    options=[('Highest Peak', 'max'),
-                                             ('Best Scored','scored')],
-                                    style={'description_width': 'initial'},  layout=widgets.Layout(height='auto', width='auto'), disabled=False)
-
-    process_hvsr_call_prefix = widgets.HTML(value='<style>p {word-wrap: break-word}</style> <p>' + 'process_hvsr' + '</p>', 
-                                       layout=widgets.Layout(width='fill', justify_content='flex-end', align_content='flex-start'))
-    process_hvsr_call = widgets.HTML(value='()')
-    process_hvsr_call_hbox = widgets.HBox([process_hvsr_call_prefix, process_hvsr_call])
-
-    # Update process_hvsr call
-    def update_process_hvsr_call():
-        ph_kwargs = get_process_hvsr_kwargs()
-        ph_text = f"""(hvsr_data=hvsr_data, 
-                        method={ph_kwargs['method']}, 
-                        smooth={ph_kwargs['smooth']}, 
-                        freq_smooth={ph_kwargs['freq_smooth']}, 
-                        f_smooth_width={ph_kwargs['f_smooth_width']}, 
-                        resample={ph_kwargs['resample']}, 
-                        outlier_curve_rmse_percentile={ph_kwargs['outlier_curve_rmse_percentile']}, 
-                        verbose={verbose_check.value})"""
-        process_hvsr_call.value='<style>p {word-wrap: break-word}</style> <p>' + ph_text + '</p>'
-    update_process_hvsr_call()
-
-    check_peaks_call_prefix = widgets.HTML(value='<style>p {word-wrap: break-word}</style> <p>'+'check_peaks' + '</p>',
-                                       layout=widgets.Layout(width='fill', justify_content='flex-end',align_content='flex-start'))
-    check_peaks_call = widgets.HTML(value='()')
-    check_peaks_call_hbox = widgets.HBox([check_peaks_call_prefix, check_peaks_call])
-
-    # Update process_hvsr call
-    def update_check_peaks_call():
-        cp_kwargs = get_check_peaks_kwargs()
-        cp_text = f"""(hvsr_data=hvsr_data, 
-                        hvsr_band={cp_kwargs['hvsr_band']}, 
-                        peak_selection={cp_kwargs['peak_selection']}, 
-                        peak_freq_range={cp_kwargs['peak_freq_range']}, 
-                        verbose={verbose_check.value})"""
-        check_peaks_call.value='<style>p {word-wrap: break-word}</style> <p>' + cp_text + '</p>'
-    update_check_peaks_call()
-
-    freq_smooth_hbox = widgets.HBox([freq_smoothing, freq_smooth_width])
-    resample_hbox = widgets.HBox([resample_hv_curve_bool, resample_hv_curve])
-    smooth_hbox = widgets.HBox([smooth_hv_curve_bool, smooth_hv_curve])
-    
-    # Set up vbox for hvsr_settings subtab
-    hvsr_settings_tab = widgets.VBox([h_combine_meth,
-                                    freq_smooth_hbox,
-                                    resample_hbox,
-                                    smooth_hbox,
-                                    hvsr_band_hbox_hvsrSet,
-                                    peak_freq_range_hbox_hvsrSet,
-                                    peak_selection_type,
-                                    process_hvsr_call_hbox,
-                                    check_peaks_call_hbox])
-
-    # PLOT SETTINGS SUBTAB
-    hv_plot_label = widgets.Label(value='HVSR Plot', layout=widgets.Layout(height='auto', width='auto', justify_content='center'))
-    component_plot_label = widgets.Label(value='Component Plot', layout=widgets.Layout(height='auto', width='auto', justify_content='center'))
-    spec_plot_label = widgets.Label(value='Spectrogram Plot', layout=widgets.Layout(height='auto', width='auto', justify_content='center'))
-
-    use_plot_label = widgets.Label(value='Use Plot', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center'))
-    use_plot_hv = widgets.Checkbox(value=True, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                   style={'description_width': 'initial'})
-    use_plot_comp = widgets.Checkbox(value=True, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                   style={'description_width': 'initial'})
-    use_plot_spec = widgets.Checkbox(value=True, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                   style={'description_width': 'initial'})
-
-    comibne_plot_label = widgets.Label(value='Combine HV and Comp. Plot', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center'))
-    combine_hv_comp = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                   style={'description_width': 'initial'})
-
-    show_peak_label = widgets.Label(value='Show Best Peak', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center'))
-    show_best_peak_hv = widgets.Checkbox(value=True, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                   style={'description_width': 'initial'})
-    show_best_peak_comp = widgets.Checkbox(value=True, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                   style={'description_width': 'initial'})
-    show_best_peak_spec = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                   style={'description_width': 'initial'})
-
-    annotate_peak_label = widgets.Label(value='Annotate Best Peak', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center'))
-    ann_best_peak_hv = widgets.Checkbox(value=True, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                   style={'description_width': 'initial'})
-    ann_best_peak_comp = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                   style={'description_width': 'initial'})
-    ann_best_peak_spec = widgets.Checkbox(value=True, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                   style={'description_width': 'initial'})
-
-    show_all_peaks_label = widgets.Label(value='Show All Peaks', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center'))
-    show_all_peaks_hv = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                   style={'description_width': 'initial'})
-
-    show_all_curves_label = widgets.Label(value='Show All Curves', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center'))
-    show_all_curves_hv = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                   style={'description_width': 'initial'})
-    show_all_curves_comp = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                   style={'description_width': 'initial'})
-
-    show_ind_peaks_label = widgets.Label(value='Show Individual Peaks', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center'))
-    show_ind_peaks_hv = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                   style={'description_width': 'initial'})
-    show_ind_peaks_spec = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                       style={'description_width': 'initial'})
-
-    show_std_label = widgets.Label(value='Show Standard Deviation', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center'))
-    show_std_hv = widgets.Checkbox(value=True, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                   style={'description_width': 'initial'})
-    show_std_comp = widgets.Checkbox(value=True, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                   style={'description_width': 'initial'})
-
-    show_legend_label = widgets.Label(value='Show Legend', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center'))
-    show_legend_hv = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                   style={'description_width': 'initial'})
-    show_legend_comp = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                   style={'description_width': 'initial'})
-    show_legend_spec = widgets.Checkbox(value=False, layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'),
-                                   style={'description_width': 'initial'})
-
-    x_type_label = widgets.Label(value='X Type', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center'))
-    x_type = widgets.Dropdown(options=[('Frequency', 'freq'), ('Period', 'period')],
-                              layout=widgets.Layout(height='auto', width='auto'), style={'description_width': 'initial'})
-
-    plotly_kwargs_label = widgets.Label(value='Plotly Kwargs', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center'))
-    plotly_kwargs = widgets.Text(style={'description_width': 'initial'},
-                                layout=widgets.Layout(height='auto', width='auto'), disabled=False)
-
-    mpl_kwargs_label = widgets.Label(value='Matplotlib Kwargs', layout=widgets.Layout(height='auto', width='auto', justify_content='flex-end', align_items='center'))
-    mpl_kwargs = widgets.Text(style={'description_width': 'initial'},
-                                layout=widgets.Layout(height='auto', width='auto'), disabled=False)
-
-    plot_hvsr_call = widgets.Label(value=f"Plot String: '{get_default(sprit_hvsr.get_report, 'plot_type')}'")
-    def update_plot_string():
-        plot_hvsr_text = f"""Plot String: {get_get_report_kwargs()['plot_type']}"""
-        plot_hvsr_call.value = plot_hvsr_text
-    update_plot_string()
-
-    update_plot_button = widgets.Button(description='Update Plot',button_style='info',layout=widgets.Layout(height='auto', width='auto'))
-    def manually_update_results_fig(change):
-        plot_string = get_get_report_kwargs()['plot_type']
-        update_results_fig(hvsr_results, plot_string)
-        sprit_tabs.selected_index = 4
-
-    # Set up grid for ppsd_settings subtab
-    plot_settings_tab[0, 5:10]   = hv_plot_label
-    plot_settings_tab[0, 10:15]  = component_plot_label
-    plot_settings_tab[0, 15:] = spec_plot_label
-
-    plot_settings_tab[1, :] = widgets.HTML('<hr>', layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'))
-
-    plot_settings_tab[2, :5] = use_plot_label
-    plot_settings_tab[2, 5:10] = use_plot_hv
-    plot_settings_tab[2, 10:15] = use_plot_comp
-    plot_settings_tab[2, 15:] = use_plot_spec
-
-    plot_settings_tab[3, :] = widgets.HTML('<hr>', layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'))
-
-    plot_settings_tab[4, :5] = comibne_plot_label
-    plot_settings_tab[4, 10:15] = combine_hv_comp
-
-    plot_settings_tab[5, :5] = show_peak_label
-    plot_settings_tab[5, 5:10] = show_best_peak_hv
-    plot_settings_tab[5, 10:15] = show_best_peak_comp
-    plot_settings_tab[5, 15:] = show_best_peak_spec
-
-    plot_settings_tab[6, :5] = annotate_peak_label
-    plot_settings_tab[6, 5:10] = ann_best_peak_hv
-    plot_settings_tab[6, 10:15] = ann_best_peak_comp
-    plot_settings_tab[6, 15:] = ann_best_peak_spec
-
-    plot_settings_tab[7, :5] = show_all_peaks_label
-    plot_settings_tab[7, 5:10] = show_all_peaks_hv
-
-    plot_settings_tab[8, :5] = show_all_curves_label
-    plot_settings_tab[8, 5:10] = show_all_curves_hv
-    plot_settings_tab[8, 10:15] = show_all_curves_comp
-
-    plot_settings_tab[9, :5] = show_ind_peaks_label
-    plot_settings_tab[9, 5:10] = show_ind_peaks_hv
-    plot_settings_tab[9, 15:] = show_ind_peaks_spec
-   
-    plot_settings_tab[10, :5] = show_std_label
-    plot_settings_tab[10, 5:10] = show_std_hv
-    plot_settings_tab[10, 10:15] = show_std_comp
-
-    plot_settings_tab[11, :5] = show_legend_label
-    plot_settings_tab[11, 5:10] = show_legend_hv
-    plot_settings_tab[11, 10:15] = show_legend_comp
-    plot_settings_tab[11, 15:] = show_legend_spec
-
-    plot_settings_tab[12, :] = widgets.HTML('<hr>', layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'))
-
-    plot_settings_tab[13, :5] = x_type_label
-    plot_settings_tab[13, 6:] = x_type
-
-    plot_settings_tab[14, :5] = plotly_kwargs_label
-    plot_settings_tab[14, 6:] = plotly_kwargs
-
-    plot_settings_tab[15, :5] = mpl_kwargs_label
-    plot_settings_tab[15, 6:] = mpl_kwargs
-
-    plot_settings_tab[16, :] = widgets.HTML('<hr>', layout=widgets.Layout(height='auto', width='auto', justify_content='center', align_items='center'))
-
-    plot_settings_tab[17, :18] = plot_hvsr_call
-    plot_settings_tab[17, 18:] = update_plot_button
-    update_plot_button.on_click(manually_update_results_fig)
-
-    # Place everything in Settings Tab
-    settings_subtabs = widgets.Tab([ppsd_settings_tab, hvsr_settings_tab, outlier_settings_tab, plot_settings_tab])
-    settings_tab = widgets.VBox(children=[settings_subtabs, settings_progress_hbox])
-    settings_subtabs.set_title(0, "PPSD Settings")
-    settings_subtabs.set_title(1, "HVSR Settings")
-    settings_subtabs.set_title(2, "Outlier Settings")
-    settings_subtabs.set_title(3, "Plot Settings")
-
-    # LOG TAB - not currently using
-    log_tab = widgets.VBox(children=[log_textArea])
-    #log_textArea = widgets.Textarea(value="SESSION LOG", disabled=True, layout={'height': '99%','width': '99%', 'overflow': 'scroll'})
-
-    # RESULTS TAB
-    # PLOT SUBTAB
-    global results_subp
-    results_subp = subplots.make_subplots(rows=3, cols=1, horizontal_spacing=0.01, vertical_spacing=0.01, row_heights=[2,1,1])
-    results_fig = go.FigureWidget(results_subp)
-    global results_graph_widget
-    results_graph_widget = widgets.Output()   
-
-    with results_graph_widget:
-        display(results_fig)
-
-    global printed_results_textArea
-    printed_results_textArea = widgets.Textarea(value="RESULTS", disabled=True, layout={'height': '500px','width': '99%', 'overflow': 'scroll'})
-
-    global results_table
-    initialTableCols=['SiteName', 'Acq_Date', 'Longitude', 'Latitude', 'Elevation',
-                      'PeakFrequency', 'WindowLengthFreq.', 'SignificantCycles', 'LowCurveStDevOverTime', 
-                      'PeakProminenceBelow', 'PeakProminenceAbove', 'PeakAmpClarity', 
-                      'FreqStability', 'PeakStability_FreqStD', 'PeakStability_AmpStD', 'PeakPasses']
-    results_table = widgets.HTML(value=pd.DataFrame(columns=initialTableCols).to_html())
-
-    # A text box labeled Data Filepath
-    export_results_table_filepath = widgets.Text(description='Export Filepath:',
-                                    placeholder='', value='',
-                                    style={'description_width': 'initial'},layout=widgets.Layout(width='90%'))
-
-    export_results_table_read_button = widgets.Button(description='', icon='fa-file-import',button_style='success',
-                                            layout=widgets.Layout(width='10%'))
-    export_results_table_browse_button = widgets.Button(description='Export Table',
-                                            layout=widgets.Layout(width='10%'))
-    def export_results_table(button):
-        try:
-            if button.value == 'Export Table':
-                root = tk.Tk()
-                root.wm_attributes('-topmost', True)
-                root.withdraw()
-                export_results_table_filepath.value = str(filedialog.asksaveasfilename(defaultextension='.csv', title='Save CSV Report'))
-                root.destroy()
-        except Exception as e:
-            print(e)
-            export_results_table_browse_button.disabled=True
-            export_results_table_browse_button.description='Use Text Field'
-
-        out_path = export_results_table_filepath.value
-        sprit_hvsr.get_report(hvsr_results, report_format='csv', export_path=out_path,
-                              csv_overwrite_opt='overwrite')
-
-    export_results_table_browse_button.on_click(export_results_table)
-    export_results_table_read_button.on_click(export_results_table)
-
-    results_table_export_hbox = widgets.HBox([export_results_table_filepath, export_results_table_read_button, export_results_table_browse_button])
-    results_table_vbox = widgets.VBox([results_table, results_table_export_hbox])
-    global results_tab
-    results_subtabs = widgets.Tab([results_graph_widget, printed_results_textArea, results_table_vbox])
-    results_tab = widgets.VBox(children=[results_subtabs])
-    results_subtabs.set_title(0, "Plot")
-    results_subtabs.set_title(1, "Peak Tests")
-    results_subtabs.set_title(2, "Peak Table")
-
-    widget_param_dict = {
-        'fetch_data': 
-            {'source': data_source_type,
-            'trim_dir': trim_directory,
-            'export_format': trim_export_dropdown,
-            'detrend': detrend_type_dropdown,
-            'detrend_order': detrend_order,
-            'verbose': verbose_check},
-        'remove_noise': 
-            {
-            'sat_percent': max_saturation_pct,
-            'noise_percent': max_window_pct,
-            'sta': sta,
-            'lta': lta,
-            'stalta_thresh': [stalta_thresh_low, stalta_thresh_hi],
-            'warmup_time': warmup_time,
-            'cooldown_time': cooldown_time,
-            'min_win_size': noisy_window_length,
-            'remove_raw_noise': raw_data_remove_check,
-            'verbose': verbose_check},
-        'generate_ppsds': 
-            {'verbose': verbose_check,
-             'skip_on_gaps':skip_on_gaps, 
-             'db_bins':[db_bins_min, db_bins_max, db_bins_step],
-             'ppsd_length':ppsd_length, 
-             'overlap':overlap_pct, 
-             'special_handling':special_handling_dropdown, 
-             'period_smoothing_width_octaves':period_smoothing_width, 
-             'period_step_octaves':period_step_octave, 
-             'period_limits':[hvsr_band_min_box, hvsr_band_max_box]},
-        'process_hvsr': 
-            {'method': h_combine_meth,
-            'smooth': smooth_hv_curve,
-            'freq_smooth': freq_smoothing,
-            'f_smooth_width': freq_smooth_width,
-            'resample': resample_hv_curve,
-            'verbose': verbose_check},
-        'remove_outlier_curves': 
-            {'rmse_thresh': rmse_thresh,
-            'use_percentile': rmse_pctile_check,
-            'use_hv_curve': use_hv_curve_rmse,
-            'verbose': verbose_check},
-        'check_peaks': 
-            {'hvsr_band': [hvsr_band_min_box, hvsr_band_max_box],
-            'peak_freq_range': [peak_freq_range_min_box, peak_freq_range_max_box],
-            'verbose': verbose_check},
-        'get_report': 
-            {
-            'export_path': export_results_table_filepath,
-            'verbose': verbose_check}}
-
-    # SPRIT WIDGET
-    # Add all  a tab and add the grid to it
-    global sprit_tabs
-    sprit_tabs = widgets.Tab([input_tab, preview_tab, settings_tab, log_tab, results_tab])
-    sprit_tabs.set_title(0, "Input")
-    sprit_tabs.set_title(1, "Preview")
-    sprit_tabs.set_title(2, "Settings")
-    sprit_tabs.set_title(3, "Log")
-    sprit_tabs.set_title(4, "Results")
-
-    sprit_title = widgets.Label(value='SPRIT', layout=widgets.Layout(width='150px'))
-    sprit_subtitle = widgets.Label(value='Tools for ambient siesmic noise analysis using HVSR',
-                                   layout=widgets.Layout(flex='1', justify_content='flex-start', align_content='flex-end'))
-
-    # Function to open a link
-    def open_dist(button):
-        link = 'https://pypi.org/project/sprit/'
-        webbrowser.open_new_tab(link)
-
-    def open_repo(button):
-        link = 'https://github.com/RJbalikian/SPRIT-HVSR'
-        webbrowser.open_new_tab(link)
-
-    def open_docs(button):
-        link = 'https://rjbalikian.github.io/SPRIT-HVSR/main.html'
-        webbrowser.open_new_tab(link)
-
-    sourcebutton = widgets.Button(description="PyPI",
-                                layout=widgets.Layout(width='4%', justify_content='flex-end',align_content='flex-end'))
-    repobutton = widgets.Button(description="Repo",
-                                layout=widgets.Layout(width='4%', justify_content='flex-end',align_content='flex-end'))
-    docsbutton = widgets.Button(description="Docs",
-                                layout=widgets.Layout(width='8%', justify_content='flex-end',align_content='flex-end'))
-
-    # Attach the open_link function to the button's on_click event
-    sourcebutton.on_click(open_dist)
-    repobutton.on_click(open_repo)
-    docsbutton.on_click(open_docs)
-
-    titlehbox = widgets.HBox([sprit_title, sprit_subtitle, repobutton, sourcebutton, docsbutton],
-                            layout = widgets.Layout(align_content='space-between'))
-    
-    title_style = {
-        'font_family': 'Arial, sans-serif',
-        'font_size': '36px',
-        'font_weight': 'bold',
-        'color': 'black'
-    }
-
-    # Apply the style to the label
-    sprit_title.style = title_style
-
-    sprit_widget = widgets.VBox([titlehbox, sprit_tabs])
-
-    def observe_children(widget, callback):
-        if hasattr(widget, 'children'):
-            for child in widget.children:
-                child.observe(callback)
-                observe_children(child, callback)
-
-    def any_update(change):
-        update_input_param_call()
-        update_fetch_data_call()
-        update_remove_noise_call()
-        update_generate_ppsd_call()
-        update_process_hvsr_call()
-        update_remove_outlier_curve_call()
-        update_check_peaks_call()
-        update_plot_string()
-
-    observe_children(sprit_tabs, any_update)
-
-    # Display the tab
-    display(sprit_widget)
-
-def export_data(hvsr_data, export_path=None, ext='hvsr', verbose=False) +def export_data(hvsr_data, hvsr_export_path=None, ext='hvsr', verbose=False)

Export data into pickle format that can be read back in using import_data() so data does not need to be processed each time. @@ -3322,55 +226,11 @@

Parameters

hvsr_data : HVSRData or HVSRBatch
Data to be exported
-
export_path : str or filepath object, default = None
-
String or filepath object to be read by pathlib.Path() and/or a with open(export_path, 'wb') statement. If None, defaults to input datapath directory, by default None
+
hvsr_export_path : str or filepath object, default = None
+
String or filepath object to be read by pathlib.Path() and/or a with open(hvsr_export_path, 'wb') statement. If None, defaults to input input_data directory, by default None
ext : str, default = 'hvsr'
Filepath extension to use for data file, by default 'hvsr'
-
- -Expand source code - -
def export_data(hvsr_data, export_path=None, ext='hvsr', verbose=False):
-    """Export data into pickle format that can be read back in using import_data() so data does not need to be processed each time. 
-    Default extension is .hvsr but it is still a pickled file that can be read in using pickle.load().
-
-    Parameters
-    ----------
-    hvsr_data : HVSRData or HVSRBatch
-        Data to be exported
-    export_path : str or filepath object, default = None
-        String or filepath object to be read by pathlib.Path() and/or a with open(export_path, 'wb') statement. If None, defaults to input datapath directory, by default None
-    ext : str, default = 'hvsr'
-        Filepath extension to use for data file, by default 'hvsr'
-    """
-    def _do_export(_hvsr_data=hvsr_data, _export_path=export_path, _ext=ext):
-        
-        fname = f"{_hvsr_data.site}_{_hvsr_data.acq_date}_pickled.{ext}"
-        if _export_path is None or _export_path is True:
-            _export_path = _hvsr_data['datapath']
-            _export_path = pathlib.Path(_export_path).with_name(fname)
-        else:
-            _export_path = pathlib.Path(_export_path)
-            if _export_path.is_dir():
-                _export_path = _export_path.joinpath(fname)    
-
-        _export_path = str(_export_path)
-        with open(_export_path, 'wb') as f:
-            pickle.dump(_hvsr_data, f) 
-            
-        if verbose:
-            print(f"Processed data exported as pickled data to: {_export_path} [~{round(float(pathlib.Path(_export_path).stat().st_size)/2**20,1)} Mb]")    
-            
-    if isinstance(hvsr_data, HVSRBatch):
-        for sitename in hvsr_data.keys():
-            _do_export(hvsr_data[sitename], export_path, ext)
-    elif isinstance(hvsr_data, HVSRData):
-        _do_export(hvsr_data, export_path, ext)
-    else:
-        print("Error in data export. Data must be either of type sprit.HVSRData or sprit.HVSRBatch")         
-    return
-
def export_settings(hvsr_data, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True) @@ -3389,639 +249,57 @@

Parameters

If 'all', saves all possible types in their respective json files. If 'instrument', save the instrument settings to their respective file. If 'processing', saves the processing settings to their respective file. By default 'all' -
include_location : bool, default=False, input CRS
-
Whether to include the location parametersin the exported settings document.This includes xcoord, ycoord, elevation, elev_unit, and input_crs
-
verbose : bool, default=True
-
Whether to print outputs and information to the terminal
-
-
- -Expand source code - -
def export_settings(hvsr_data, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True):
-    """Save settings to json file
-
-    Parameters
-    ----------
-    export_settings_path : str, default="default"
-        Where to save the json file(s) containing the settings, by default 'default'. 
-        If "default," will save to sprit package resources. Otherwise, set a filepath location you would like for it to be saved to.
-        If 'all' is selected, a directory should be supplied. 
-        Otherwise, it will save in the directory of the provided file, if it exists. Otherwise, defaults to the home directory.
-    export_settings_type : str, {'all', 'instrument', 'processing'}
-        What kind of settings to save. 
-        If 'all', saves all possible types in their respective json files.
-        If 'instrument', save the instrument settings to their respective file.
-        If 'processing', saves the processing settings to their respective file. By default 'all'
-    include_location : bool, default=False, input CRS
-        Whether to include the location parametersin the exported settings document.This includes xcoord, ycoord, elevation, elev_unit, and input_crs
-    verbose : bool, default=True
-        Whether to print outputs and information to the terminal
-
-    """
-    fnameDict = {}
-    fnameDict['instrument'] = "instrument_settings.json"
-    fnameDict['processing'] = "processing_settings.json"
-
-    if export_settings_path == 'default' or export_settings_path is True:
-        settingsPath = resource_dir.joinpath('settings')
-    else:
-        export_settings_path = pathlib.Path(export_settings_path)
-        if not export_settings_path.exists():
-            if not export_settings_path.parent.exists():
-                print(f'The provided value for export_settings_path ({export_settings_path}) does not exist. Saving settings to the home directory: {pathlib.Path.home()}')
-                settingsPath = pathlib.Path.home()
-            else:
-                settingsPath = export_settings_path.parent
-        
-        if export_settings_path.is_dir():
-            settingsPath = export_settings_path
-        elif export_settings_path.is_file():
-            settingsPath = export_settings_path.parent
-            fnameDict['instrument'] = export_settings_path.name+"_instrumentSettings.json"
-            fnameDict['processing'] = export_settings_path.name+"_processingSettings.json"
-
-    #Get final filepaths        
-    instSetFPath = settingsPath.joinpath(fnameDict['instrument'])
-    procSetFPath = settingsPath.joinpath(fnameDict['processing'])
-
-    #Get settings values
-    instKeys = ["instrument", "net", "sta", "loc", "cha", "depth", "metapath", "hvsr_band"]
-    inst_location_keys = ['xcoord', 'ycoord', 'elevation', 'elev_unit', 'input_crs']
-    procFuncs = [fetch_data, remove_noise, generate_ppsds, process_hvsr, check_peaks, get_report]
-
-    instrument_settings_dict = {}
-    processing_settings_dict = {}
-
-    for k in instKeys:
-        if isinstance(hvsr_data[k], pathlib.PurePath):
-            #For those that are paths and cannot be serialized
-            instrument_settings_dict[k] = hvsr_data[k].as_posix()
-        else:
-            instrument_settings_dict[k] = hvsr_data[k]
-
-    if include_location:
-        for k in inst_location_keys:
-            if isinstance(hvsr_data[k], pathlib.PurePath):
-                #For those that are paths and cannot be serialized
-                instrument_settings_dict[k] = hvsr_data[k].as_posix()
-            else:
-                instrument_settings_dict[k] = hvsr_data[k]
-
-    
-    for func in procFuncs:
-        funcName = func.__name__
-        processing_settings_dict[funcName] = {}
-        for arg in hvsr_data['processing_parameters'][funcName]:
-            if isinstance(hvsr_data['processing_parameters'][funcName][arg], (HVSRBatch, HVSRData)):
-                pass
-            else:
-                processing_settings_dict[funcName][arg] = hvsr_data['processing_parameters'][funcName][arg]
-    
-    if verbose:
-        print("Exporting Settings")
-    #Save settings files
-    if export_settings_type.lower()=='instrument' or export_settings_type.lower()=='all':
-        try:
-            with open(instSetFPath.with_suffix('.inst').as_posix(), 'w') as instSetF:
-                jsonString = json.dumps(instrument_settings_dict, indent=2)
-                #Format output for readability
-                jsonString = jsonString.replace('\n    ', ' ')
-                jsonString = jsonString.replace('[ ', '[')
-                jsonString = jsonString.replace('\n  ]', ']')
-                #Export
-                instSetF.write(jsonString)
-        except:
-            instSetFPath = pathlib.Path.home().joinpath(instSetFPath.name)
-            with open(instSetFPath.with_suffix('.inst').as_posix(), 'w') as instSetF:
-                jsonString = json.dumps(instrument_settings_dict, indent=2)
-                #Format output for readability
-                jsonString = jsonString.replace('\n    ', ' ')
-                jsonString = jsonString.replace('[ ', '[')
-                jsonString = jsonString.replace('\n  ]', ']')
-                #Export
-                instSetF.write(jsonString)
-                            
-        if verbose:
-            print(f"Instrument settings exported to {instSetFPath}")
-            print(f"{jsonString}")
-            print()
-    if export_settings_type.lower()=='processing' or export_settings_type.lower()=='all':
-        try:
-            with open(procSetFPath.with_suffix('.proc').as_posix(), 'w') as procSetF:
-                jsonString = json.dumps(processing_settings_dict, indent=2)
-                #Format output for readability
-                jsonString = jsonString.replace('\n    ', ' ')
-                jsonString = jsonString.replace('[ ', '[')
-                jsonString = jsonString.replace('\n  ]', ']')
-                jsonString = jsonString.replace('\n  },','\n\t\t},\n')
-                jsonString = jsonString.replace('{ "', '\n\t\t{\n\t\t"')
-                jsonString = jsonString.replace(', "', ',\n\t\t"')
-                jsonString = jsonString.replace('\n  }', '\n\t\t}')
-                jsonString = jsonString.replace(': {', ':\n\t\t\t{')
-                
-                #Export
-                procSetF.write(jsonString)
-        except:
-            procSetFPath = pathlib.Path.home().joinpath(procSetFPath.name)
-            with open(procSetFPath.with_suffix('.proc').as_posix(), 'w') as procSetF:
-                jsonString = json.dumps(processing_settings_dict, indent=2)
-                #Format output for readability
-                jsonString = jsonString.replace('\n    ', ' ')
-                jsonString = jsonString.replace('[ ', '[')
-                jsonString = jsonString.replace('\n  ]', ']')
-                jsonString = jsonString.replace('\n  },','\n\t\t},\n')
-                jsonString = jsonString.replace('{ "', '\n\t\t{\n\t\t"')
-                jsonString = jsonString.replace(', "', ',\n\t\t"')
-                jsonString = jsonString.replace('\n  }', '\n\t\t}')
-                jsonString = jsonString.replace(': {', ':\n\t\t\t{')
-                
-                #Export
-                procSetF.write(jsonString)            
-        if verbose:
-            print(f"Processing settings exported to {procSetFPath}")
-            print(f"{jsonString}")
-            print()
-
- -
-def fetch_data(params, source='file', trim_dir=None, export_format='mseed', detrend='spline', detrend_order=2, update_metadata=True, plot_input_stream=False, verbose=False, **kwargs) -
-
-

Fetch ambient seismic data from a source to read into obspy stream

-

Parameters

-
-
params -: dict
-
Dictionary containing all the necessary params to get data.
-
Parameters defined using input_params() function.
-
source -: str,
-
String indicating where/how data file was created. For example, if raw data, will need to find correct channels.
-
'raw' finds raspberry shake data, from raw output copied using scp directly from Raspberry Shake, either in folder or subfolders;
-
'dir' is used if the day's 3 component files (currently Raspberry Shake supported only) are all 3 contained in a directory by themselves.
-
'file' is used if the params['datapath'] specified in input_params() is the direct filepath to a single file to be read directly into an obspy stream.
-
'batch' is used to read a list or specified set of seismic files.
-
Most commonly, a csv file can be read in with all the parameters. Each row in the csv is a separate file. Columns can be arranged by parameter.
-
trim_dir : None or str or pathlib obj, default=None
-
If None (or False), data is not trimmed in this function. -Otherwise, this is the directory to save trimmed and exported data.
-
export_format : str='mseed'
-
If trim_dir is not None, this is the format in which to save the data
-
detrend : str or bool, default='spline'
-
If False, data is not detrended. -Otherwise, this should be a string accepted by the type parameter of the obspy.core.trace.Trace.detrend method: https://docs.obspy.org/packages/autogen/obspy.core.trace.Trace.detrend.html
-
detrend_order : int, default=2
-
If detrend parameter is 'spline' or 'polynomial', this is passed directly to the order parameter of obspy.core.trace.Trace.detrend method.
-
update_metadata : bool, default=True
-
Whether to update the metadata file, used primarily with Raspberry Shake data which uses a generic inventory file.
-
plot_input_stream : bool, default=False
-
Whether to plot the raw input stream. This plot includes a spectrogram (Z component) and the raw (with decimation for speed) plots of each component signal.
-
verbose : bool, default=False
-
Whether to print outputs and inputs to the terminal
-
**kwargs
-
Keywords arguments, primarily for 'batch' and 'dir' sources
-
-

Returns

-
-
params : HVSRData or HVSRBatch object
-
Same as params parameter, but with an additional "stream" attribute with an obspy data stream with 3 traces: Z (vertical), N (North-south), and E (East-west)
-
-
- -Expand source code - -
def fetch_data(params, source='file', trim_dir=None, export_format='mseed', detrend='spline', detrend_order=2, update_metadata=True, plot_input_stream=False, verbose=False, **kwargs):
-    """Fetch ambient seismic data from a source to read into obspy stream
-    
-    Parameters
-    ----------
-    params  : dict
-        Dictionary containing all the necessary params to get data.
-            Parameters defined using input_params() function.
-    source  : str, {'raw', 'dir', 'file', 'batch'}
-        String indicating where/how data file was created. For example, if raw data, will need to find correct channels.
-            'raw' finds raspberry shake data, from raw output copied using scp directly from Raspberry Shake, either in folder or subfolders; 
-            'dir' is used if the day's 3 component files (currently Raspberry Shake supported only) are all 3 contained in a directory by themselves.
-            'file' is used if the params['datapath'] specified in input_params() is the direct filepath to a single file to be read directly into an obspy stream.
-            'batch' is used to read a list or specified set of seismic files. 
-                Most commonly, a csv file can be read in with all the parameters. Each row in the csv is a separate file. Columns can be arranged by parameter.
-    trim_dir : None or str or pathlib obj, default=None
-        If None (or False), data is not trimmed in this function.
-        Otherwise, this is the directory to save trimmed and exported data.
-    export_format: str='mseed'
-        If trim_dir is not None, this is the format in which to save the data
-    detrend : str or bool, default='spline'
-        If False, data is not detrended.
-        Otherwise, this should be a string accepted by the type parameter of the obspy.core.trace.Trace.detrend method: https://docs.obspy.org/packages/autogen/obspy.core.trace.Trace.detrend.html
-    detrend_order : int, default=2
-        If detrend parameter is 'spline' or 'polynomial', this is passed directly to the order parameter of obspy.core.trace.Trace.detrend method.
-    update_metadata : bool, default=True
-        Whether to update the metadata file, used primarily with Raspberry Shake data which uses a generic inventory file.
-    plot_input_stream : bool, default=False
-        Whether to plot the raw input stream. This plot includes a spectrogram (Z component) and the raw (with decimation for speed) plots of each component signal.
-    verbose : bool, default=False
-        Whether to print outputs and inputs to the terminal
-    **kwargs
-        Keywords arguments, primarily for 'batch' and 'dir' sources
-        
-    Returns
-    -------
-    params : HVSRData or HVSRBatch object
-        Same as params parameter, but with an additional "stream" attribute with an obspy data stream with 3 traces: Z (vertical), N (North-south), and E (East-west)
-    """
-    # Get intput paramaters
-    orig_args = locals().copy()
-    start_time = datetime.datetime.now()
-    
-    # Update with processing parameters specified previously in input_params, if applicable
-    if 'processing_parameters' in params.keys():
-        if 'fetch_data' in params['processing_parameters'].keys():
-            defaultVDict = dict(zip(inspect.getfullargspec(fetch_data).args[1:], 
-                        inspect.getfullargspec(fetch_data).defaults))
-            defaultVDict['kwargs'] = kwargs
-            for k, v in params['processing_parameters']['fetch_data'].items():
-                # Manual input to function overrides the imported parameter values
-                if k!='params' and k in orig_args.keys() and orig_args[k]==defaultVDict[k]:
-                    orig_args[k] = v
-
-    #Update local variables, in case of previously-specified parameters
-    source=orig_args['source']
-    trim_dir=orig_args['trim_dir']
-    export_format=orig_args['export_format']
-    detrend=orig_args['detrend']
-    detrend_order=orig_args['detrend_order']
-    update_metadata=orig_args['update_metadata']
-    plot_input_stream=orig_args['plot_input_stream']
-    verbose=orig_args['verbose']
-    kwargs=orig_args['kwargs']
-
-    if source != 'batch' and verbose:
-        print('\nFetching data (fetch_data())')
-        print()
-
-    params = get_metadata(params, update_metadata=update_metadata, source=source)
-    inv = params['inv']
-    date=params['acq_date']
-
-    #Cleanup for gui input
-    if isinstance(params['datapath'], (obspy.Stream, obspy.Trace)):
-        pass
-    elif '}' in str(params['datapath']):
-        params['datapath'] = params['datapath'].as_posix().replace('{','')
-        params['datapath'] = params['datapath'].split('}')
-    
-    sampleListNos = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
-    sampleList = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'batch', 'sample', 'sample_batch']
-    for s in sampleListNos:
-        sampleList.append(f'sample{s}')
-        sampleList.append(f'sample_{s}')
-
-    #Make sure datapath is pointing to an actual file
-    if isinstance(params['datapath'],list):
-        for i, d in enumerate(params['datapath']):
-            params['datapath'][i] = sprit_utils.checkifpath(str(d).strip(), sample_list=sampleList)
-        dPath = params['datapath']
-    elif isinstance(params['datapath'], (obspy.Stream, obspy.Trace)):
-        pass
-    else:
-        dPath = sprit_utils.checkifpath(params['datapath'], sample_list=sampleList)
-
-    inst = params['instrument']
-
-    #Need to put dates and times in right formats first
-    if type(date) is datetime.datetime:
-        doy = date.timetuple().tm_yday
-        year = date.year
-    elif type(date) is datetime.date:
-        date = datetime.datetime.combine(date, datetime.time(hour=0, minute=0, second=0))
-        doy = date.timetuple().tm_yday
-        year = date.year
-    elif type(date) is tuple:
-        if date[0]>366:
-            raise ValueError('First item in date tuple must be day of year (0-366)', 0)
-        elif date[1] > datetime.datetime.now().year:
-            raise ValueError('Second item in date tuple should be year, but given item is in the future', 0)
-        else:
-            doy = date[0]
-            year = date[1]
-    elif type(date) is str:
-        if '/' in date:
-            dateSplit = date.split('/')            
-        elif '-' in date:
-            dateSplit = date.split('-')
-        else:
-            dateSplit = date
-
-        if int(dateSplit[0]) > 31:
-            date = datetime.datetime(int(dateSplit[0]), int(dateSplit[1]), int(dateSplit[2]))
-            doy = date.timetuple().tm_yday
-            year = date.year
-        elif int(dateSplit[0])<=12 and int(dateSplit[2]) > 31:
-            warnings.warn("Preferred date format is 'yyyy-mm-dd' or 'yyyy/mm/dd'. Will attempt to parse date.")
-            date = datetime.datetime(int(dateSplit[2]), int(dateSplit[0]), int(dateSplit[1]))
-            doy = date.timetuple().tm_yday
-            year = date.year
-        else:
-            warnings.warn("Preferred date format is 'yyyy-mm-dd' or 'yyyy/mm/dd'. Cannot parse date.")
-    elif type(date) is int:
-        doy = date
-        year = datetime.datetime.today().year
-    else: #FOR NOW, need to update
-        date = datetime.datetime.now()
-        doy = date.timetuple().tm_yday
-        year = date.year
-        warnings.warn("Did not recognize date, using year {} and day {}".format(year, doy))
-
-    #Select which instrument we are reading from (requires different processes for each instrument)
-    raspShakeInstNameList = ['raspberry shake', 'shake', 'raspberry', 'rs', 'rs3d', 'rasp. shake', 'raspshake']
-    trominoNameList = ['tromino', 'trom', 'tromino 3g', 'tromino 3g+', 'tr', 't']
-
-    #Get any kwargs that are included in obspy.read
-    obspyReadKwargs = {}
-    for argName in inspect.getfullargspec(obspy.read)[0]:
-        if argName in kwargs.keys():
-            obspyReadKwargs[argName] = kwargs[argName]
-
-    #Select how reading will be done
-    if source=='raw':
-        try:
-            if inst.lower() in raspShakeInstNameList:
-                rawDataIN = __read_RS_file_struct(dPath, source, year, doy, inv, params, verbose=verbose)
-
-            elif inst.lower() in trominoNameList:
-                rawDataIN = read_tromino_files(dPath, params, verbose=verbose, **kwargs)
-        except:
-            raise RuntimeError(f"Data not fetched for {params['site']}. Check input parameters or the data file.")
-    elif source=='stream' or isinstance(params, (obspy.Stream, obspy.Trace)):
-        rawDataIN = params['datapath'].copy()
-    elif source=='dir':
-        if inst.lower() in raspShakeInstNameList:
-            rawDataIN = __read_RS_file_struct(dPath, source, year, doy, inv, params, verbose=verbose)
-        else:
-            obspyFiles = {}
-            for obForm in obspyFormats:
-                temp_file_glob = pathlib.Path(dPath.as_posix().lower()).glob('.'+obForm.lower())
-                for f in temp_file_glob:
-                    currParams = params
-                    currParams['datapath'] = f
-
-                    curr_data = fetch_data(params, source='file', #all the same as input, except just reading the one file using the source='file'
-                                trim_dir=trim_dir, export_format=export_format, detrend=detrend, detrend_order=detrend_order, update_metadata=update_metadata, verbose=verbose, **kwargs)
-                    curr_data.merge()
-                    obspyFiles[f.stem] = curr_data  #Add path object to dict, with filepath's stem as the site name
-            return HVSRBatch(obspyFiles)
-    elif source=='file' and str(params['datapath']).lower() not in sampleList:
-        if isinstance(dPath, list) or isinstance(dPath, tuple):
-            rawStreams = []
-            for datafile in dPath:
-                rawStream = obspy.read(datafile, **obspyReadKwargs)
-                rawStreams.append(rawStream) #These are actually streams, not traces
-            for i, stream in enumerate(rawStreams):
-                if i == 0:
-                    rawDataIN = obspy.Stream(stream) #Just in case
-                else:
-                    rawDataIN = rawDataIN + stream #This adds a stream/trace to the current stream object
-        elif str(dPath)[:6].lower()=='sample':
-            pass
-        else:
-            rawDataIN = obspy.read(dPath, **obspyReadKwargs)#, starttime=obspy.core.UTCDateTime(params['starttime']), endttime=obspy.core.UTCDateTime(params['endtime']), nearest_sample =True)
-        import warnings
-        with warnings.catch_warnings():
-            warnings.simplefilter(action='ignore', category=UserWarning)
-            rawDataIN.attach_response(inv)
-    elif source=='batch' and str(params['datapath']).lower() not in sampleList:
-        if verbose:
-            print('\nFetching data (fetch_data())')
-        batch_data_read_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in batch_data_read.__code__.co_varnames}
-        params = batch_data_read(input_data=params['datapath'], verbose=verbose, **batch_data_read_kwargs)
-        params = HVSRBatch(params)
-        return params
-    elif str(params['datapath']).lower() in sampleList or f"sample{params['datapath'].lower()}" in sampleList:
-        sample_data_dir = pathlib.Path(pkg_resources.resource_filename(__name__, 'resources/sample_data/'))
-        if source=='batch':
-            params['datapath'] = sample_data_dir.joinpath('Batch_SampleData.csv')
-            params = batch_data_read(input_data=params['datapath'], batch_type='sample', verbose=verbose)
-            params = HVSRBatch(params)
-            return params
-
-        elif source=='dir':
-            params['datapath'] = sample_data_dir.joinpath('Batch_SampleData.csv')
-            params = batch_data_read(input_data=params['datapath'], batch_type='sample', verbose=verbose)
-            params = HVSRBatch(params)
-            return params
-
-        elif source=='file':
-            params['datapath'] = str(params['datapath']).lower()
-            
-            if params['datapath'].lower() in sampleFileKeyMap.keys():
-                params['datapath'] = sampleFileKeyMap[params['datapath'].lower()]
-            else:
-                params['datapath'] = sample_data_dir.joinpath('SampleHVSRSite1_AM.RAC84.00.2023.046_2023-02-15_1704-1734.MSEED')
-
-            dPath = params['datapath']
-            rawDataIN = obspy.read(dPath)#, starttime=obspy.core.UTCDateTime(params['starttime']), endttime=obspy.core.UTCDateTime(params['endtime']), nearest_sample =True)
-            import warnings
-            with warnings.catch_warnings():
-                warnings.simplefilter(action='ignore', category=UserWarning)
-                rawDataIN.attach_response(inv)
-    else:
-        try:
-            rawDataIN = obspy.read(dPath)
-            rawDataIN.attach_response(inv)
-        except:
-            RuntimeError(f'source={source} not recognized, and datapath cannot be read using obspy.read()')
-
-    #Get metadata from the data itself, if not reading raw data
-    try:
-        dataIN = rawDataIN.copy()
-        if source!='raw':
-            #Use metadata from file for;
-            # site
-            if params['site'] == "HVSR Site":
-                if isinstance(dPath, (list, tuple)):
-                    dPath = dPath[0]
-                params['site'] = dPath.stem
-                params['params']['site'] = dPath.stem
-            
-            # network
-            if str(params['net']) == 'AM':
-                params['net'] = dataIN[0].stats.network
-                params['params']['net'] = dataIN[0].stats.network
-
-            # station
-            if str(params['sta']) == 'RAC84':
-                params['sta'] = dataIN[0].stats.station
-                params['params']['sta'] = dataIN[0].stats.station
-
-            # loc
-            if str(params['loc']) == '00':
-                params['loc'] = dataIN[0].stats.location
-                params['params']['loc'] = dataIN[0].stats.location
-            
-            # channels
-            channelList = []
-            if str(params['cha']) == ['EHZ', 'EHN', 'EHE']:
-                for tr in dataIN:
-                    if tr.stats.channel not in channelList:
-                        channelList.append(tr.stats.channel)
-                        channelList.sort(reverse=True) #Just so z is first, just in case
-                params['cha'] = channelList
-                params['params']['cha'] = channelList
-           
-            # Acquisition date
-            if str(params['acq_date']) == str(datetime.datetime.now().date()):
-                params['acq_date'] = dataIN[0].stats.starttime.date
-
-            # starttime
-            today_Starttime = obspy.UTCDateTime(datetime.datetime(year=datetime.date.today().year, month=datetime.date.today().month,
-                                                                 day = datetime.date.today().day,
-                                                                hour=0, minute=0, second=0, microsecond=0))
-            maxStarttime = datetime.datetime(year=params['acq_date'].year, month=params['acq_date'].month, day=params['acq_date'].day, 
-                                             hour=0, minute=0, second=0, microsecond=0, tzinfo=datetime.timezone.utc)
-            if str(params['starttime']) == str(today_Starttime):
-                for tr in dataIN.merge():
-                    currTime = datetime.datetime(year=tr.stats.starttime.year, month=tr.stats.starttime.month, day=tr.stats.starttime.day,
-                                        hour=tr.stats.starttime.hour, minute=tr.stats.starttime.minute, 
-                                       second=tr.stats.starttime.second, microsecond=tr.stats.starttime.microsecond, tzinfo=datetime.timezone.utc)
-                    if currTime > maxStarttime:
-                        maxStarttime = currTime
-
-                newStarttime = obspy.UTCDateTime(datetime.datetime(year=params['acq_date'].year, month=params['acq_date'].month,
-                                                                 day = params['acq_date'].day,
-                                                                hour=maxStarttime.hour, minute=maxStarttime.minute, 
-                                                                second=maxStarttime.second, microsecond=maxStarttime.microsecond))
-                params['starttime'] = newStarttime
-                params['params']['starttime'] = newStarttime
-
-            # endttime
-            today_Endtime = obspy.UTCDateTime(datetime.datetime(year=datetime.date.today().year, month=datetime.date.today().month,
-                                                                 day = datetime.date.today().day,
-                                                                hour=23, minute=59, second=59, microsecond=999999))
-            tomorrow_Endtime = today_Endtime + (60*60*24)
-            minEndtime = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)#(hour=23, minute=59, second=59, microsecond=999999)
-            if str(params['endtime']) == str(today_Endtime) or str(params['endtime'])==tomorrow_Endtime:
-                for tr in dataIN.merge():
-                    currTime = datetime.datetime(year=tr.stats.endtime.year, month=tr.stats.endtime.month, day=tr.stats.endtime.day,
-                                        hour=tr.stats.endtime.hour, minute=tr.stats.endtime.minute, 
-                                       second=tr.stats.endtime.second, microsecond=tr.stats.endtime.microsecond, tzinfo=datetime.timezone.utc)
-                    if currTime < minEndtime:
-                        minEndtime = currTime
-                newEndtime = obspy.UTCDateTime(datetime.datetime(year=minEndtime.year, month=minEndtime.month,
-                                                                 day = minEndtime.day,
-                                                                hour=minEndtime.hour, minute=minEndtime.minute, 
-                                                                second=minEndtime.second, microsecond=minEndtime.microsecond, tzinfo=datetime.timezone.utc))
-                params['endtime'] = newEndtime
-                params['params']['endtime'] = newEndtime
-
-
-            #print(dataIN)
-            #print(params['starttime'])
-            #print(params['endtime'])
-            dataIN = dataIN.split()
-            dataIN = dataIN.trim(starttime=params['starttime'], endtime=params['endtime'])
-            dataIN.merge()
-            #print(dataIN)
-    except:
-        raise RuntimeError('Data not fetched. Check your input parameters or the data file.')
-        
-    #Trim and save data as specified
-    if trim_dir=='None':
-        trim_dir=None
-    if not trim_dir:
-        pass
-    else:
-        if isinstance(params, HVSRBatch):
-            pass
-        else:
-            dataIN = _trim_data(input=params, stream=dataIN, export_dir=trim_dir, source=source, export_format=export_format)
-
-    #Split data if masked array (if there are gaps)...detrending cannot be done without
-    for tr in dataIN:
-        if isinstance(tr.data, np.ma.masked_array):
-            dataIN = dataIN.split()
-            #Splits entire stream if any trace is masked_array
-            break
-
-    #Detrend data
-    if isinstance(params, HVSRBatch):
-        pass
-    else:
-        dataIN =  __detrend_data(input=dataIN, detrend=detrend, detrend_order=detrend_order, verbose=verbose, source=source)
-
-    #Remerge data
-    dataIN = dataIN.merge(method=1)
-
-    #Plot the input stream?
-    if plot_input_stream:
-        try:
-            params['InputPlot'] = _plot_specgram_stream(stream=dataIN, params=params, component='Z', stack_type='linear', detrend='mean', dbscale=True, fill_gaps=None, ylimstd=3, return_fig=True, fig=None, ax=None, show_plot=False)
-            #_get_removed_windows(input=dataIN, fig=params['InputPlot'][0], ax=params['InputPlot'][1], lineArtist =[], winArtist = [], existing_lineArtists=[], existing_xWindows=[], exist_win_format='matplotlib', keep_line_artists=True, time_type='matplotlib', show_plot=True)
-            plt.show()
-        except:
-            print('Error with default plotting method, falling back to internal obspy plotting method')
-            dataIN.plot(method='full', linewidth=0.25)
-
-    #Sort channels (make sure Z is first, makes things easier later)
-    if isinstance(params, HVSRBatch):
-        pass
-    else:
-        dataIN = _sort_channels(input=dataIN, source=source, verbose=verbose)
-
-    #Clean up the ends of the data unless explicitly specified to do otherwise (this is a kwarg, not a parameter)
-    if 'clean_ends' not in kwargs.keys():
-        clean_ends=True 
-    else:
-        clean_ends = kwargs['clean_ends']
-
-    if clean_ends:
-        maxStarttime = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc) - datetime.timedelta(days=36500) #100 years ago
-        minEndtime = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc) 
-
-        for tr in dataIN:
-            currStarttime = datetime.datetime(year=tr.stats.starttime.year, month=tr.stats.starttime.month, day=tr.stats.starttime.day, 
-                                         hour=tr.stats.starttime.hour, minute=tr.stats.starttime.minute, 
-                                         second=tr.stats.starttime.second, microsecond=tr.stats.starttime.microsecond, tzinfo=datetime.timezone.utc)
-            if currStarttime > maxStarttime:
-                maxStarttime = currStarttime
-
-            currEndtime = datetime.datetime(year=tr.stats.endtime.year, month=tr.stats.endtime.month, day=tr.stats.endtime.day, 
-                                         hour=tr.stats.endtime.hour, minute=tr.stats.endtime.minute, 
-                                         second=tr.stats.endtime.second, microsecond=tr.stats.endtime.microsecond, tzinfo=datetime.timezone.utc)
-
-            if currEndtime < minEndtime:
-                minEndtime = currEndtime
-
-
-        maxStarttime = obspy.UTCDateTime(maxStarttime)
-        minEndtime = obspy.UTCDateTime(minEndtime)
-        dataIN = dataIN.split()
-        for tr in dataIN:
-            tr.trim(starttime=maxStarttime, endtime=minEndtime)
-            pass
-        dataIN.merge()
-    
-    params['batch'] = False #Set False by default, will get corrected later in batch mode        
-    params['input_stream'] = dataIN.copy()
-    params['stream'] = dataIN.copy()
-    
-    if 'processing_parameters' not in params.keys():
-        params['processing_parameters'] = {}
-    params['processing_parameters']['fetch_data'] = {}
-    for key, value in orig_args.items():
-        params['processing_parameters']['fetch_data'][key] = value
-
-    
-    params['ProcessingStatus']['FetchDataStatus'] = True
-    if verbose and not isinstance(params, HVSRBatch):
-        dataINStr = dataIN.__str__().split('\n')
-        for line in dataINStr:
-            print('\t',line)
-    
-    params = _check_processing_status(params, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose)
-
-    return params
-
+
include_location : bool, default=False, input CRS
+
Whether to include the location parametersin the exported settings document.This includes xcoord, ycoord, elevation, elev_unit, and input_crs
+
verbose : bool, default=True
+
Whether to print outputs and information to the terminal
+ + +
+def fetch_data(params, source='file', data_export_path=None, data_export_format='mseed', detrend='spline', detrend_order=2, update_metadata=True, plot_input_stream=False, plot_engine='matplotlib', show_plot=True, verbose=False, **kwargs) +
+
+

Fetch ambient seismic data from a source to read into obspy stream

+

Parameters

+
+
params +: dict
+
Dictionary containing all the necessary params to get data.
+
Parameters defined using input_params() function.
+
source +: str,
+
String indicating where/how data file was created. For example, if raw data, will need to find correct channels.
+
'raw' finds raspberry shake data, from raw output copied using scp directly from Raspberry Shake, either in folder or subfolders;
+
'dir' is used if the day's 3 component files (currently Raspberry Shake supported only) are all 3 contained in a directory by themselves.
+
'file' is used if the params['input_data'] specified in input_params() is the direct filepath to a single file to be read directly into an obspy stream.
+
'batch' is used to read a list or specified set of seismic files.
+
Most commonly, a csv file can be read in with all the parameters. Each row in the csv is a separate file. Columns can be arranged by parameter.
+
data_export_path : None or str or pathlib obj, default=None
+
If None (or False), data is not trimmed in this function. +Otherwise, this is the directory to save trimmed and exported data.
+
data_export_format : str='mseed'
+
If data_export_path is not None, this is the format in which to save the data
+
detrend : str or bool, default='spline'
+
If False, data is not detrended. +Otherwise, this should be a string accepted by the type parameter of the obspy.core.trace.Trace.detrend method: https://docs.obspy.org/packages/autogen/obspy.core.trace.Trace.detrend.html
+
detrend_order : int, default=2
+
If detrend parameter is 'spline' or 'polynomial', this is passed directly to the order parameter of obspy.core.trace.Trace.detrend method.
+
update_metadata : bool, default=True
+
Whether to update the metadata file, used primarily with Raspberry Shake data which uses a generic inventory file.
+
plot_input_stream : bool, default=False
+
Whether to plot the raw input stream. This plot includes a spectrogram (Z component) and the raw (with decimation for speed) plots of each component signal.
+
plot_engine : str, default='matplotlib'
+
Which plotting library/engine to use for plotting the Input stream. Options are 'matplotlib', 'plotly', or 'obspy' (not case sensitive).
+
verbose : bool, default=False
+
Whether to print outputs and inputs to the terminal
+
**kwargs
+
Keywords arguments, primarily for 'batch' and 'dir' sources
+
+

Returns

+
+
params : HVSRData or HVSRBatch object
+
Same as params parameter, but with an additional "stream" attribute with an obspy data stream with 3 traces: Z (vertical), N (North-south), and E (East-west)
+
def format_time(inputDT, tzone='UTC') @@ -4044,172 +322,6 @@

Returns

outputTimeObj : datetime object in UTC
Output datetime.datetime object, now in UTC time.
-
- -Expand source code - -
def format_time(inputDT, tzone='UTC'):
-    """Private function to format time, used in other functions
-
-    Formats input time to datetime objects in utc
-
-    Parameters
-    ----------
-    inputDT : str or datetime obj 
-        Input datetime. Can include date and time, just date (time inferred to be 00:00:00.00) or just time (if so, date is set as today)
-    tzone   : str='utc' or int {'utc', 'local'} 
-        Timezone of data entry. 
-            If string and not utc, assumed to be timezone of computer running the process.
-            If int, assumed to be offset from UTC (e.g., CST in the United States is -6; CDT in the United States is -5)
-
-    Returns
-    -------
-    outputTimeObj : datetime object in UTC
-        Output datetime.datetime object, now in UTC time.
-
-    """
-    if type(inputDT) is str:
-        #tzone = 'America/Chicago'
-        #Format string to datetime obj
-        div = '-'
-        timeDiv = 'T'
-        if "/" in inputDT:
-            div = '/'
-            hasDate = True
-        elif '-' in inputDT:
-            div = '-'
-            hasDate = True
-        else:
-            hasDate= False
-            year = datetime.datetime.today().year
-            month = datetime.datetime.today().month
-            day = datetime.datetime.today().day
-
-        if ':' in inputDT:
-            hasTime = True
-            if 'T' in inputDT:
-                timeDiv = 'T'
-            else:
-                timeDiv = ' '
-        else:
-            hasTime = False
-        
-        if hasDate:
-            #If first number is 4-dig year (assumes yyyy-dd-mm is not possible)
-            if len(inputDT.split(div)[0])>2:
-                year = inputDT.split(div)[0]
-                month = inputDT.split(div)[1]
-                day = inputDT.split(div)[2].split(timeDiv)[0]
-
-            #If last number is 4-dig year            
-            elif len(inputDT.split(div)[2].split(timeDiv)[0])>2:
-                #..and first number is day
-                if int(inputDT.split(div)[0])>12:
-                    #dateStr = '%d'+div+'%m'+div+'%Y'   
-                    year = inputDT.split(div)[2].split(timeDiv)[0]
-                    month = inputDT.split(div)[1]
-                    day = inputDT.split(div)[0]
-                #...and first number is month (like American style)                             
-                else:
-                    year = inputDT.split(div)[2].split(timeDiv)[0]
-                    month = inputDT.split(div)[0]
-                    day = inputDT.split(div)[1]     
-            
-            #Another way to catch if first number is (2-digit) year
-            elif int(inputDT.split(div)[0])>31:
-                #dateStr = '%y'+div+'%m'+div+'%d'
-                year = inputDT.split(div)[0]
-                #Assumes anything less than current year is from this century
-                if year < datetime.datetime.today().year:
-                    year = '20'+year
-                else:#...and anything more than current year is from last century
-                    year = '19'+year
-                #assumes day will always come last in this instance, as above
-                month = inputDT.split(div)[1]
-                day = inputDT.split(div)[2].split(timeDiv)[0]
-
-            #If last digit is (2 digit) year           
-            elif int(inputDT.split(div)[2].split(timeDiv)[0])>31:
-                #...and first digit is day
-                if int(inputDT.split(div)[0])>12:
-                    #dateStr = '%d'+div+'%m'+div+'%y'       
-                    year = inputDT.split(div)[2].split(timeDiv)[0]
-                    if year < datetime.datetime.today().year:
-                        year = '20'+year
-                    else:
-                        year = '19'+year
-                    month = inputDT.split(div)[1]
-                    day = inputDT.split(div)[0]                           
-                else: #...and second digit is day
-                    #dateStr = '%m'+div+'%d'+div+'%y'
-                    year = inputDT.split(div)[2].split(timeDiv)[0]
-                    if year < datetime.datetime.today().year:
-                        year = '20'+year
-                    else:
-                        year = '19'+year
-                    month = inputDT.split(div)[0]
-                    day = inputDT.split(div)[1]                  
-
-        hour=0
-        minute=0
-        sec=0
-        microS=0
-        if hasTime:
-            if hasDate:
-                timeStr = inputDT.split(timeDiv)[1]
-            else:
-                timeStr = inputDT
-            
-            if 'T' in timeStr:
-                timeStr=timeStr.split('T')[1]
-            elif ' ' in timeStr:
-                timeStr=timeStr.split(' ')[1]
-
-            timeStrList = timeStr.split(':')
-            if len(timeStrList[0])>2:
-                timeStrList[0] = timeStrList[0][-2:]
-            elif int(timeStrList[0]) > 23:
-                timeStrList[0] = timeStrList[0][-1:]
-            
-            if len(timeStrList) == 3:
-                if '.' in timeStrList[2]:
-                    microS = int(timeStrList[2].split('.')[1])
-                    timeStrList[2] = timeStrList[2].split('.')[0]
-            elif len(timeStrList) == 2:
-                timeStrList.append('00')
-
-            hour = int(timeStrList[0])
-            minute=int(timeStrList[1])
-            sec = int(timeStrList[2])
-
-        outputTimeObj = datetime.datetime(year=int(year),month=int(month), day=int(day),
-                                hour=int(hour), minute=int(minute), second=int(sec), microsecond=int(microS))
-
-    elif type(inputDT) is datetime.datetime or type(inputDT) is datetime.time:
-        outputTimeObj = inputDT
-
-    #Add timezone info
-    availableTimezones = list(map(str.lower, zoneinfo.available_timezones()))
-    if outputTimeObj.tzinfo is not None and outputTimeObj.tzinfo.utcoffset(outputTimeObj) is not None:
-        #This is already timezone aware
-        pass
-    elif type(tzone) is int:
-        outputTimeObj = outputTimeObj-datetime.timedelta(hours=tzone)
-    elif type(tzone) is str:
-        if tzone.lower() in availableTimezones:
-            outputTimeObj = outputTimeObj.replace(tzinfo=zoneinfo.ZoneInfo(tzone))
-        else:
-            raise ValueError("Timezone {} is not in official list. \nAvailable timezones:\n{}".format(tzone, availableTimezones))
-    elif isinstance(tzone, zoneinfo.ZoneInfo):
-        outputTimeObj = outputTimeObj.replace(tzinfo=tzone)
-    else:
-        raise ValueError("Timezone must be either str or int")
-    
-    #Convert to UTC
-    outputTimeObj = outputTimeObj.astimezone(datetime.timezone.utc)   
-
-    return outputTimeObj
-
def generate_ppsds(hvsr_data, azimuthal_ppsds=False, verbose=False, **ppsd_kwargs) @@ -4229,7 +341,7 @@

Parameters

**ppsd_kwargs : dict
Dictionary with keyword arguments that are passed directly to obspy.signal.PPSD. If the following keywords are not specified, their defaults are amended in this function from the obspy defaults for its PPSD function. Specifically: -- ppsd_length defaults to 60 (seconds) here instead of 3600 +- ppsd_length defaults to 30 (seconds) here instead of 3600 - skip_on_gaps defaults to True instead of False - period_step_octaves defaults to 0.03125 instead of 0.125
@@ -4237,307 +349,12 @@

Returns

ppsds : HVSRData object
     Dictionary containing entries with ppsds for each channel
 
-
- -Expand source code - -
def generate_ppsds(hvsr_data, azimuthal_ppsds=False, verbose=False, **ppsd_kwargs):
-    """Generates PPSDs for each channel
-
-        Channels need to be in Z, N, E order
-        Info on PPSD creation here: https://docs.obspy.org/packages/autogen/obspy.signal.spectral_estimation.PPSD.html
-        
-        Parameters
-        ----------
-        hvsr_data : dict, HVSRData object, or HVSRBatch object
-            Data object containing all the parameters and other data of interest (stream and paz, for example)
-        azimuthal_ppsds : bool, default=False
-            Whether to generate PPSDs for azimuthal data
-        verbose : bool, default=True
-            Whether to print inputs and results to terminal
-        **ppsd_kwargs : dict
-            Dictionary with keyword arguments that are passed directly to obspy.signal.PPSD.
-            If the following keywords are not specified, their defaults are amended in this function from the obspy defaults for its PPSD function. Specifically:
-                - ppsd_length defaults to 60 (seconds) here instead of 3600
-                - skip_on_gaps defaults to True instead of False
-                - period_step_octaves defaults to 0.03125 instead of 0.125
-
-        Returns
-        -------
-            ppsds : HVSRData object
-                Dictionary containing entries with ppsds for each channel
-    """
-    #First, divide up for batch or not
-    orig_args = locals().copy() #Get the initial arguments
-    start_time = datetime.datetime.now()
-
-    ppsd_kwargs_sprit_defaults = ppsd_kwargs.copy()
-    #Set defaults here that are different than obspy defaults
-    if 'ppsd_length' not in ppsd_kwargs.keys():
-        ppsd_kwargs_sprit_defaults['ppsd_length'] = 30.0
-    if 'skip_on_gaps' not in ppsd_kwargs.keys():
-        ppsd_kwargs_sprit_defaults['skip_on_gaps'] = True
-    if 'period_step_octaves' not in ppsd_kwargs.keys():
-        ppsd_kwargs_sprit_defaults['period_step_octaves'] = 0.03125
-    if 'period_limits' not in ppsd_kwargs.keys():
-        ppsd_kwargs_sprit_defaults['period_limits'] =  [1/40, 1/1]
-
-    #Get Probablistic power spectral densities (PPSDs)
-    #Get default args for function
-    def get_default_args(func):
-        signature = inspect.signature(func)
-        return {
-            k: v.default
-            for k, v in signature.parameters.items()
-            if v.default is not inspect.Parameter.empty
-            }
-    
-    ppsd_kwargs = get_default_args(PPSD)
-    ppsd_kwargs.update(ppsd_kwargs_sprit_defaults)#Update with sprit defaults, or user input
-    orig_args['ppsd_kwargs'] = ppsd_kwargs
-
-    # Update with processing parameters specified previously in input_params, if applicable
-    if 'processing_parameters' in hvsr_data.keys():
-        if 'generate_ppsds' in hvsr_data['processing_parameters'].keys():
-            defaultVDict = dict(zip(inspect.getfullargspec(generate_ppsds).args[1:], 
-                                    inspect.getfullargspec(generate_ppsds).defaults))
-            defaultVDict['ppsd_kwargs'] = ppsd_kwargs
-            for k, v in hvsr_data['processing_parameters']['generate_ppsds'].items():
-                # Manual input to function overrides the imported parameter values
-                if not isinstance(v, (HVSRData, HVSRBatch)) and (k in orig_args.keys()) and (orig_args[k]==defaultVDict[k]):
-                    orig_args[k] = v
-
-    azimuthal_ppsds = orig_args['azimuthal_ppsds']
-    verbose = orig_args['verbose']
-    ppsd_kwargs = orig_args['ppsd_kwargs']
-
-    if (verbose and isinstance(hvsr_data, HVSRBatch)) or (verbose and not hvsr_data['batch']):
-        if isinstance(hvsr_data, HVSRData) and hvsr_data['batch']:
-            pass
-        else:
-            print('\nGenerating Probabilistic Power Spectral Densities (generate_ppsds())')
-            print('\tUsing the following parameters:')
-            for key, value in orig_args.items():
-                if key=='hvsr_data':
-                    pass
-                else:
-                    print('\t  {}={}'.format(key, value))
-            print()
-
-    #Site is in the keys anytime it's not batch
-    if isinstance(hvsr_data, HVSRBatch):
-        #If running batch, we'll loop through each one
-        for site_name in hvsr_data.keys():
-            args = orig_args.copy() #Make a copy so we don't accidentally overwrite
-            individual_params = hvsr_data[site_name] #Get what would normally be the "hvsr_data" variable for each site
-            args['hvsr_data'] = individual_params #reset the hvsr_data parameter we originally read in to an individual site hvsr_data
-            #args['hvsr_data']['batch'] = False #Set to false, since only running this time
-            if hvsr_data[site_name]['ProcessingStatus']['OverallStatus']:
-                try:
-                    hvsr_data[site_name] = _generate_ppsds_batch(**args) #Call another function, that lets us run this function again
-                except:
-                    hvsr_data[site_name]['ProcessingStatus']['PPSDStatus']=False
-                    hvsr_data[site_name]['ProcessingStatus']['OverallStatus'] = False                     
-            else:
-                hvsr_data[site_name]['ProcessingStatus']['PPSDStatus']=False
-                hvsr_data[site_name]['ProcessingStatus']['OverallStatus'] = False                
-            
-            try:
-                sprit_gui.update_progress_bars(prog_percent=5)
-            except Exception as e:
-                pass
-                #print(e)
-        return hvsr_data
-    else:
-        paz = hvsr_data['paz']
-        stream = hvsr_data['stream']
-
-        if azimuthal_ppsds:
-            #get azimuthal ppsds (in an HVSRBatch object?)
-            pass
-        else:
-            #Get ppsds of e component
-            eStream = stream.select(component='E')
-            estats = eStream.traces[0].stats
-            ppsdE = PPSD(estats, paz['E'],  **ppsd_kwargs)
-            ppsdE.add(eStream)
-
-            #Get ppsds of n component
-            nStream = stream.select(component='N')
-            nstats = nStream.traces[0].stats
-            ppsdN = PPSD(nstats, paz['N'], **ppsd_kwargs)
-            ppsdN.add(nStream)
-
-            #Get ppsds of z component
-            zStream = stream.select(component='Z')
-            zstats = zStream.traces[0].stats
-            ppsdZ = PPSD(zstats, paz['Z'], **ppsd_kwargs)
-            ppsdZ.add(zStream)
-
-            ppsds = {'Z':ppsdZ, 'N':ppsdN, 'E':ppsdE}
-
-            #Add to the input dictionary, so that some items can be manipulated later on, and original can be saved
-            hvsr_data['ppsds_obspy'] = ppsds
-            hvsr_data['ppsds'] = {}
-            anyKey = list(hvsr_data['ppsds_obspy'].keys())[0]
-            
-            #Get ppsd class members
-            members = [mems for mems in dir(hvsr_data['ppsds_obspy'][anyKey]) if not callable(mems) and not mems.startswith("_")]
-            hvsr_data['ppsds']['Z'] = {}
-            hvsr_data['ppsds']['E'] = {}
-            hvsr_data['ppsds']['N'] = {}
-            
-            #Get lists/arrays so we can manipulate data later and copy everything over to main 'ppsds' subdictionary (convert lists to np.arrays for consistency)
-            listList = ['times_data', 'times_gaps', 'times_processed','current_times_used', 'psd_values'] #Things that need to be converted to np.array first, for consistency
-            timeKeys= ['times_processed','current_times_used','psd_values']
-            timeDiffWarn = True
-            dfList = []
-            time_data = {}
-            time_dict = {}
-            for m in members:
-                hvsr_data['ppsds']['Z'][m] = getattr(hvsr_data['ppsds_obspy']['Z'], m)
-                hvsr_data['ppsds']['E'][m] = getattr(hvsr_data['ppsds_obspy']['E'], m)
-                hvsr_data['ppsds']['N'][m] = getattr(hvsr_data['ppsds_obspy']['N'], m)
-                if m in listList:
-                
-                    hvsr_data['ppsds']['Z'][m] = np.array(hvsr_data['ppsds']['Z'][m])
-                    hvsr_data['ppsds']['E'][m] = np.array(hvsr_data['ppsds']['E'][m])
-                    hvsr_data['ppsds']['N'][m] = np.array(hvsr_data['ppsds']['N'][m])
-                
-                if str(m)=='times_processed':
-                    unique_times = np.unique(np.array([hvsr_data['ppsds']['Z'][m],
-                                            hvsr_data['ppsds']['E'][m],
-                                            hvsr_data['ppsds']['N'][m]]))
-                    
-                    common_times = []
-                    for currTime in unique_times:
-                        if currTime in hvsr_data['ppsds']['Z'][m]:
-                            if currTime in hvsr_data['ppsds']['E'][m]:
-                                if currTime in hvsr_data['ppsds']['N'][m]:
-                                    common_times.append(currTime)
-
-                    cTimeIndList = []
-                    for cTime in common_times:
-                        ZArr = hvsr_data['ppsds']['Z'][m]
-                        EArr = hvsr_data['ppsds']['E'][m]
-                        NArr = hvsr_data['ppsds']['N'][m]
-
-                        cTimeIndList.append([int(np.where(ZArr == cTime)[0][0]),
-                                            int(np.where(EArr == cTime)[0][0]),
-                                            int(np.where(NArr == cTime)[0][0])])
-                        
-                #Make sure number of time windows is the same between PPSDs (this can happen with just a few slightly different number of samples)
-                if m in timeKeys:
-                    if str(m) != 'times_processed':
-                        time_data[str(m)] = (hvsr_data['ppsds']['Z'][m], hvsr_data['ppsds']['E'][m], hvsr_data['ppsds']['N'][m])
-
-                    #print(m, hvsr_data['ppsds']['Z'][m])
-
-                    tSteps_same = hvsr_data['ppsds']['Z'][m].shape[0] == hvsr_data['ppsds']['E'][m].shape[0] == hvsr_data['ppsds']['N'][m].shape[0]
-
-                    if not tSteps_same:
-                        shortestTimeLength = min(hvsr_data['ppsds']['Z'][m].shape[0], hvsr_data['ppsds']['E'][m].shape[0], hvsr_data['ppsds']['N'][m].shape[0])
-
-                        maxPctDiff = 0
-                        for comp in hvsr_data['ppsds'].keys():
-                            currCompTimeLength = hvsr_data['ppsds'][comp][m].shape[0]
-                            timeLengthDiff = currCompTimeLength - shortestTimeLength
-                            percentageDiff = timeLengthDiff / currCompTimeLength
-                            if percentageDiff > maxPctDiff:
-                                maxPctDiff = percentageDiff
-
-                        for comp in hvsr_data['ppsds'].keys():
-                            while hvsr_data['ppsds'][comp][m].shape[0] > shortestTimeLength:
-                                hvsr_data['ppsds'][comp][m] = hvsr_data['ppsds'][comp][m][:-1]
-                        
-                        
-                        if maxPctDiff > 0.05 and timeDiffWarn:
-                            warnings.warn(f"\t  Number of ppsd time windows between different components is significantly different: {round(maxPctDiff*100,2)}% > 5%. Last windows will be trimmed.")
-                        elif verbose  and timeDiffWarn:
-                            print(f"\t  Number of ppsd time windows between different components is different by {round(maxPctDiff*100,2)}%. Last window(s) of components with larger number of ppsd windows will be trimmed.")
-                        timeDiffWarn = False #So we only do this warning once, even though there are multiple arrays that need to be trimmed
-
-            for i, currTStep in enumerate(cTimeIndList):
-                colList = []
-                currTStepList = []
-                colList.append('TimesProcessed_Obspy')
-                currTStepList.append(common_times[i])
-                for tk in time_data.keys():
-                    colList.append(str(tk)+'_Z')
-                    colList.append(str(tk)+'_E')
-                    colList.append(str(tk)+'_N')
-                    currTStepList.append(time_data[tk][0][currTStep[0]])#z
-                    currTStepList.append(time_data[tk][1][currTStep[1]])#e
-                    currTStepList.append(time_data[tk][2][currTStep[2]])#n
-
-                dfList.append(currTStepList)
-                
-            hvsrDF = pd.DataFrame(dfList, columns=colList)
-            hvsrDF['TimesProcessed_ObspyEnd'] = hvsrDF['TimesProcessed_Obspy'] + ppsd_kwargs['ppsd_length']
-            
-            #Add other times (for start times)
-            def convert_to_datetime(obspyUTCDateTime):
-                return obspyUTCDateTime.datetime.replace(tzinfo=datetime.timezone.utc)
-
-            def convert_to_mpl_dates(obspyUTCDateTime):
-                return obspyUTCDateTime.matplotlib_date
-
-            hvsrDF['TimesProcessed'] = hvsrDF['TimesProcessed_Obspy'].apply(convert_to_datetime)     
-            hvsrDF['TimesProcessed_End'] = hvsrDF['TimesProcessed'] + datetime.timedelta(days=0,seconds=ppsd_kwargs['ppsd_length'])
-            hvsrDF['TimesProcessed_MPL'] = hvsrDF['TimesProcessed_Obspy'].apply(convert_to_mpl_dates)
-            hvsrDF['TimesProcessed_MPLEnd'] = hvsrDF['TimesProcessed_MPL'] + (ppsd_kwargs['ppsd_length']/86400)
-            
-            hvsrDF['Use'] = True
-            hvsrDF['Use']=hvsrDF['Use'].astype(bool)
-            for gap in hvsr_data['ppsds']['Z']['times_gaps']:
-                hvsrDF['Use'] = (hvsrDF['TimesProcessed_MPL'].gt(gap[1].matplotlib_date))| \
-                                (hvsrDF['TimesProcessed_MPLEnd'].lt(gap[0].matplotlib_date))# | \
-            
-            hvsrDF['Use'] = hvsrDF['Use'].astype(bool)
-            if 'xwindows_out' in hvsr_data.keys():
-                for window in hvsr_data['xwindows_out']:
-                    hvsrDF['Use'] = (hvsrDF['TimesProcessed_MPL'][hvsrDF['Use']].lt(window[0]) & hvsrDF['TimesProcessed_MPLEnd'][hvsrDF['Use']].lt(window[0]) )| \
-                            (hvsrDF['TimesProcessed_MPL'][hvsrDF['Use']].gt(window[1]) & hvsrDF['TimesProcessed_MPLEnd'][hvsrDF['Use']].gt(window[1]))
-                hvsrDF['Use'] = hvsrDF['Use'].astype(bool)
-                
-            hvsrDF.set_index('TimesProcessed', inplace=True)
-            hvsr_data['hvsr_df'] = hvsrDF
-            #Create dict entry to keep track of how many outlier hvsr curves are removed (2-item list with [0]=current number, [1]=original number of curves)
-            hvsr_data['tsteps_used'] = [hvsrDF['Use'].sum(), hvsrDF['Use'].shape[0]]
-            #hvsr_data['tsteps_used'] = [hvsr_data['ppsds']['Z']['times_processed'].shape[0], hvsr_data['ppsds']['Z']['times_processed'].shape[0]]
-            
-            hvsr_data['tsteps_used'][0] = hvsr_data['ppsds']['Z']['current_times_used'].shape[0]
-            
-            hvsr_data = sprit_utils.make_it_classy(hvsr_data)
-        
-            if 'processing_parameters' not in hvsr_data.keys():
-                hvsr_data['processing_parameters'] = {}
-            hvsr_data['processing_parameters']['generate_ppsds'] = {}
-            for key, value in orig_args.items():
-                hvsr_data['processing_parameters']['generate_ppsds'][key] = value
-
-    hvsr_data['ProcessingStatus']['PPSDStatus'] = True
-    hvsr_data = _check_processing_status(hvsr_data, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose)
-    return hvsr_data
-
def get_char(in_char)

Outputs character with proper encoding/decoding

-
- -Expand source code - -
def get_char(in_char):
-    """Outputs character with proper encoding/decoding"""
-    if in_char in greek_chars.keys():
-        out_char = greek_chars[in_char].encode(encoding='utf-8')
-    else:
-        out_char = in_char.encode(encoding='utf-8')
-    return out_char.decode('utf-8')
-
def get_metadata(params, write_path='', update_metadata=True, source=None, **read_inventory_kwargs) @@ -4562,511 +379,61 @@

Returns

params : dict
Modified input dictionary with additional key:value pair containing paz dictionary (key = "paz")
-
- -Expand source code - -
def get_metadata(params, write_path='', update_metadata=True, source=None, **read_inventory_kwargs):
-    """Get metadata and calculate or get paz parameter needed for PPSD
-
-    Parameters
-    ----------
-    params : dict
-        Dictionary containing all the input and other parameters needed for processing
-            Ouput from input_params() function
-    write_path : str
-        String with output filepath of where to write updated inventory or metadata file
-            If not specified, does not write file 
-    update_metadata : bool
-        Whether to update the metadata file itself, or just read as-is. If using provided raspberry shake metadata file, select True.
-    source : str, default=None
-        This passes the source variable value to _read_RS_metadata. It is expected that this is passed directly from the source parameter of sprit.fetch_data()
-
-    Returns
-    -------
-    params : dict
-        Modified input dictionary with additional key:value pair containing paz dictionary (key = "paz")
-    """
-    invPath = params['metapath']
-    raspShakeInstNameList = ['raspberry shake', 'shake', 'raspberry', 'rs', 'rs3d', 'rasp. shake', 'raspshake']
-    trominoNameList = ['tromino', 'trom', 'trm', 't']
-    if params['instrument'].lower() in raspShakeInstNameList:
-        if update_metadata:
-            params = _update_shake_metadata(filepath=invPath, params=params, write_path=write_path)
-        params = _read_RS_Metadata(params, source=source)
-    elif params['instrument'].lower() in trominoNameList:
-        params['paz'] = {'Z':{}, 'E':{}, 'N':{}}
-        #ALL THESE VALUES ARE PLACEHOLDERS, taken from RASPBERRY SHAKE! (Needed for PPSDs)
-        params['paz']['Z'] = {'sensitivity': 360000000.0,
-                              'gain': 360000000.0,
-                              'poles': [(-1+0j), (-3.03+0j), (-3.03+0j), (-666.67+0j)],  
-                              'zeros': [0j, 0j, 0j]}
-        params['paz']['E'] =  params['paz']['Z']
-        params['paz']['N'] =  params['paz']['Z']
-
-        channelObj_Z = obspy.core.inventory.channel.Channel(code='BHZ', location_code='00', latitude=params['params']['latitude'], 
-                                                longitude=params['params']['longitude'], elevation=params['params']['elevation'], depth=params['params']['depth'], 
-                                                azimuth=0, dip=90, types=None, external_references=None, 
-                                                sample_rate=None, sample_rate_ratio_number_samples=None, sample_rate_ratio_number_seconds=None,
-                                                storage_format=None, clock_drift_in_seconds_per_sample=None, calibration_units=None, 
-                                                calibration_units_description=None, sensor=None, pre_amplifier=None, data_logger=None,
-                                                equipments=None, response=None, description=None, comments=None, start_date=None, end_date=None, 
-                                                restricted_status=None, alternate_code=None, historical_code=None, data_availability=None, 
-                                                identifiers=None, water_level=None, source_id=None)
-        channelObj_E = obspy.core.inventory.channel.Channel(code='BHE', location_code='00', latitude=params['params']['latitude'], 
-                                                longitude=params['params']['longitude'], elevation=params['params']['elevation'], depth=params['params']['depth'], 
-                                                azimuth=90, dip=0) 
-        
-        channelObj_N = obspy.core.inventory.channel.Channel(code='BHN', location_code='00', latitude=params['params']['latitude'], 
-                                                longitude=params['params']['longitude'], elevation=params['params']['elevation'], depth=params['params']['depth'], 
-                                                azimuth=0, dip=0) 
-        
-        siteObj = obspy.core.inventory.util.Site(name=params['params']['site'], description=None, town=None, county=None, region=None, country=None)
-        stationObj = obspy.core.inventory.station.Station(code='TZ', latitude=params['params']['latitude'], longitude=params['params']['longitude'], 
-                                            elevation=params['params']['elevation'], channels=[channelObj_Z, channelObj_E, channelObj_N], site=siteObj, 
-                                            vault=None, geology=None, equipments=None, operators=None, creation_date=datetime.datetime.today(),
-                                            termination_date=None, total_number_of_channels=None, 
-                                            selected_number_of_channels=None, description='Estimated data for Tromino, this is NOT from the manufacturer',
-                                            comments=None, start_date=None, 
-                                            end_date=None, restricted_status=None, alternate_code=None, historical_code=None, 
-                                            data_availability=None, identifiers=None, water_level=None, source_id=None)
-
-        network = [obspy.core.inventory.network.Network(code='TROM', stations=[stationObj], total_number_of_stations=None, 
-                                            selected_number_of_stations=None, description=None, comments=None, start_date=None, 
-                                            end_date=None, restricted_status=None, alternate_code=None, historical_code=None, 
-                                            data_availability=None, identifiers=None, operators=None, source_id=None)]
-        
-        params['inv'] = obspy.Inventory(networks=network)
-    else:
-        if not invPath:
-            pass #if invPath is None
-        elif not pathlib.Path(invPath).exists() or invPath=='':
-            warnings.warn(f"The metapath parameter was not specified correctly. Returning original params value {params['metapath']}")
-        readInvKwargs = {}
-        argspecs = inspect.getfullargspec(obspy.read_inventory)
-        for argName in argspecs[0]:
-            if argName in read_inventory_kwargs.keys():
-                readInvKwargs[argName] = read_inventory_kwargs[argName]
-
-        readInvKwargs['path_or_file_object'] = invPath
-        params['inv'] = obspy.read_inventory(invPath)
-        if 'params' in params.keys():
-            params['params']['inv'] = params['inv']
-
-    return params
-
-def get_report(hvsr_results, report_format='print', plot_type='HVSR p ann C+ p ann Spec', export_path=None, csv_overwrite_opt='append', no_output=False, verbose=False) +def get_report(hvsr_results, report_formats=['print', 'table', 'plot', 'html', 'pdf'], azimuth='HV', plot_type='HVSR p ann C+ p ann Spec p ann', plot_engine='matplotlib', show_print_report=True, show_table_report=False, show_plot_report=True, show_html_report=False, show_pdf_report=True, suppress_report_outputs=False, show_report_outputs=False, csv_handling='append', report_export_format=None, report_export_path=None, verbose=False, **kwargs)
-

Get a report of the HVSR analysis in a variety of formats.

+

Generate and/or print and/or export a report of the HVSR analysis in a variety of formats.

+

Formats include: +* 'print': A (monospace) text summary of the HVSR results +* 'table': A pandas.DataFrame summary of the HVSR Results. +This is useful for copy/pasting directly into a larger worksheet. +* 'plot': A plot summary of the HVSR results, generated using the plot_hvsr() function. +* 'html': An HTML document/text of the HVSR results. This includes the table, print, and plot reports in one document. +* 'pdf': A PDF document showing the summary of the HVSR Results. +The PDF report is simply the HTML report saved to an A4-sized PDF document.

Parameters

hvsr_results : dict
Dictionary containing all the information about the processed hvsr data
-
report_format : {'csv', 'print', plot}
+
report_formats : {'table', 'print', plot}
Format in which to print or export the report. The following report_formats return the following items in the following attributes: -- 'plot': hvsr_results['Print_Report'] as a str str +- 'plot': hvsr_results['Print_Report'] as a str - 'print': hvsr_results['HV_Plot'] - matplotlib.Figure object -- 'csv': -hvsr_results['CSV_Report']- pandas.DataFrame object -- list/tuple - a list or tuple of the above objects, in the same order they are in the report_format list
+- 'table': +hvsr_results['Table_Report']- pandas.DataFrame object +- list/tuple - a list or tuple of the above objects, in the same order they are in the report_formats list +- 'html': hvsr_results['HTML_Report'] - a string containing the text for an HTML document +- 'pdf': currently does not save to the HVSRData object itself, can only be saved to the disk directly
plot_type : str, default = 'HVSR p ann C+ p ann Spec
-
What type of plot to plot, if 'plot' part of report_format input
-
export_path : None, bool, or filepath, default = None
-
If None or False, does not export; if True, will export to same directory as the datapath parameter in the input_params() function. +
What type of plot to plot, if 'plot' part of report_formats input
+
azimuth : str, default = 'HV'
+
Which azimuth to plot, by default "HV" which is the main "azimuth" combining the E and N components
+
csv_handling : str, {'append', 'overwrite', 'keep/rename'}
+
How to handle table report outputs if the designated csv output file already exists. By default, appends the new information to the end of the existing file.
+
suppress_report_outputs : bool, default=False
+
If True, only reads output to appropriate attribute of data class (ie, print does not print, only reads text into variable). If False, performs as normal.
+
report_export_format : list or str, default=['pdf']
+
A string or list of strings indicating which report formats should be exported to disk.
+
report_export_path : None, bool, or filepath, default = None
+
If None or False, does not export; if True, will export to same directory as the input_data parameter in the input_params() function. Otherwise, it should be a string or path object indicating where to export results. May be a file or directory. If a directory is specified, the filename will be -"-". The suffix defaults to png for report_format="plot", csv for 'csv', and does not export if 'print.'
-
csv_overwrite_opts : str, {'append', 'overwrite', 'keep/rename'}
-
How to handle csv report outputs if the designated csv output file already exists. By default, appends the new information to the end of the existing file.
-
no_output : bool, default=False
-
If True, only reads output to appropriate attribute of data class (ie, print does not print, only reads text into variable). If False, performs as normal.
+"-". +The extension/suffix defaults to png for report_formats="plot", csv for 'table', txt for 'print', html for 'html', and pdf for 'pdf.'
verbose : bool, default=True
-
Whether to print the results to terminal. This is the same output as report_format='print', and will not repeat if that is already selected
+
Whether to print the results to terminal. This is the same output as report_formats='print', and will not repeat if that is already selected

Returns

HVSRData
 
-
- -Expand source code - -
def get_report(hvsr_results, report_format='print', plot_type='HVSR p ann C+ p ann Spec', export_path=None, csv_overwrite_opt='append', no_output=False, verbose=False):    
-    """Get a report of the HVSR analysis in a variety of formats.
-        
-    Parameters
-    ----------
-    hvsr_results : dict
-        Dictionary containing all the information about the processed hvsr data
-    report_format : {'csv', 'print', plot}
-        Format in which to print or export the report.
-        The following report_formats return the following items in the following attributes:
-            - 'plot': hvsr_results['Print_Report'] as a str str
-            - 'print': hvsr_results['HV_Plot'] - matplotlib.Figure object
-            - 'csv':  hvsr_results['CSV_Report']- pandas.DataFrame object
-                - list/tuple - a list or tuple of the above objects, in the same order they are in the report_format list
-    plot_type : str, default = 'HVSR p ann C+ p ann Spec
-        What type of plot to plot, if 'plot' part of report_format input
-    export_path : None, bool, or filepath, default = None
-        If None or False, does not export; if True, will export to same directory as the datapath parameter in the input_params() function.
-        Otherwise, it should be a string or path object indicating where to export results. May be a file or directory.
-        If a directory is specified, the filename will be  "<site_name>_<acq_date>_<UTC start time>-<UTC end time>". The suffix defaults to png for report_format="plot", csv for 'csv', and does not export if 'print.'
-    csv_overwrite_opts : str, {'append', 'overwrite', 'keep/rename'}
-        How to handle csv report outputs if the designated csv output file already exists. By default, appends the new information to the end of the existing file.
-    no_output : bool, default=False
-        If True, only reads output to appropriate attribute of data class (ie, print does not print, only reads text into variable). If False, performs as normal.
-    verbose : bool, default=True
-        Whether to print the results to terminal. This is the same output as report_format='print', and will not repeat if that is already selected
-
-    Returns
-    -------
-    sprit.HVSRData
-    """
-    orig_args = locals().copy() #Get the initial arguments
-
-    # Update with processing parameters specified previously in input_params, if applicable
-    if 'processing_parameters' in hvsr_results.keys():
-        if 'get_report' in hvsr_results['processing_parameters'].keys():
-            for k, v in hvsr_results['processing_parameters']['get_report'].items():
-                defaultVDict = dict(zip(inspect.getfullargspec(get_report).args[1:], 
-                                        inspect.getfullargspec(get_report).defaults))
-                # Manual input to function overrides the imported parameter values
-                if (not isinstance(v, (HVSRData, HVSRBatch))) and (k in orig_args.keys()) and (orig_args[k]==defaultVDict[k]):
-                    orig_args[k] = v
-
-    report_format = orig_args['report_format']
-    plot_type = orig_args['plot_type']
-    export_path = orig_args['export_path']
-    csv_overwrite_opt = orig_args['csv_overwrite_opt']
-    no_output = orig_args['no_output']
-    verbose = orig_args['verbose']
-    
-    if (verbose and isinstance(hvsr_results, HVSRBatch)) or (verbose and not hvsr_results['batch']):
-        if isinstance(hvsr_results, HVSRData) and hvsr_results['batch']:
-            pass
-        else:
-            print('\nGetting HVSR Report: get_report()')
-            print('\tUsing the following parameters:')
-            for key, value in orig_args.items():
-                if key=='params':
-                    pass
-                else:
-                    print('\t  {}={}'.format(key, value))
-            print()
-
-    if isinstance(hvsr_results, HVSRBatch):
-        if verbose:
-            print('\nGetting Reports: Running in batch mode')
-
-            print('\tUsing parameters:')
-            for key, value in orig_args.items():
-                print(f'\t  {key}={value}')    
-            print()
-        #If running batch, we'll loop through each site
-        for site_name in hvsr_results.keys():
-            args = orig_args.copy() #Make a copy so we don't accidentally overwrite
-            individual_params = hvsr_results[site_name] #Get what would normally be the "params" variable for each site
-            args['hvsr_results'] = individual_params #reset the params parameter we originally read in to an individual site params
-            if hvsr_results[site_name]['ProcessingStatus']['OverallStatus']:
-                try:
-                    hvsr_results[site_name] = _get_report_batch(**args) #Call another function, that lets us run this function again
-                except:
-                    hvsr_results[site_name] = hvsr_results[site_name]
-            else:
-                hvsr_results[site_name] = hvsr_results[site_name]
-        
-        combined_csvReport = pd.DataFrame()
-        for site_name in hvsr_results.keys():
-            if 'CSV_Report' in hvsr_results[site_name].keys():
-                combined_csvReport = pd.concat([combined_csvReport, hvsr_results[site_name]['CSV_Report']], ignore_index=True, join='inner')
-        
-        if export_path is not None:
-            if export_path is True:
-                if pathlib.Path(hvsr_results['input_params']['datapath']) in sampleFileKeyMap.values():
-                    csvExportPath = pathlib.Path(os.getcwd())
-                else:
-                    csvExportPath = pathlib.Path(hvsr_results['input_params']['datapath'])
-            elif pathlib.Path(export_path).is_dir():
-                csvExportPath = export_path
-            elif pathlib.Path(export_path).is_file():
-                csvExportPath = export_path.parent
-            else:
-                csvExportPath = pathlib.Path(hvsr_results[site_name].datapath)
-                if csvExportPath.is_dir():
-                    pass
-                else:
-                    csvExportPath = csvExportPath.parent
-                
-            combined_csvReport.to_csv(csvExportPath, index=False)
-        
-    else:       
-        #if 'BestPeak' in hvsr_results.keys() and 'PassList' in hvsr_results['BestPeak'].keys():
-        try:
-            curvTestsPassed = (hvsr_results['BestPeak']['PassList']['WindowLengthFreq.'] +
-                                hvsr_results['BestPeak']['PassList']['SignificantCycles']+
-                                hvsr_results['BestPeak']['PassList']['LowCurveStDevOverTime'])
-            curvePass = curvTestsPassed > 2
-            
-            #Peak Pass?
-            peakTestsPassed = ( hvsr_results['BestPeak']['PassList']['PeakProminenceBelow'] +
-                        hvsr_results['BestPeak']['PassList']['PeakProminenceAbove']+
-                        hvsr_results['BestPeak']['PassList']['PeakAmpClarity']+
-                        hvsr_results['BestPeak']['PassList']['FreqStability']+
-                        hvsr_results['BestPeak']['PassList']['PeakStability_FreqStD']+
-                        hvsr_results['BestPeak']['PassList']['PeakStability_AmpStD'])
-            peakPass = peakTestsPassed >= 5
-        except Exception as e:
-            errMsg= 'No BestPeak identified. Check peak_freq_range or hvsr_band or try to remove bad noise windows using remove_noise() or change processing parameters in process_hvsr() or generate_ppsds(). Otherwise, data may not be usable for HVSR.'
-            print(errMsg)
-            print(e)
-            return hvsr_results
-            #raise RuntimeError('No BestPeak identified. Check peak_freq_range or hvsr_band or try to remove bad noise windows using remove_noise() or change processing parameters in process_hvsr() or generate_ppsds(). Otherwise, data may not be usable for HVSR.')
-    
-        if isinstance(report_format, (list, tuple)):
-            pass
-        else:
-            #We will use a loop later even if it's just one report type, so reformat to prepare for for loop
-            allList = [':', 'all']
-            if report_format.lower() in allList:
-                report_format = ['print', 'csv', 'plot']
-            else:
-                report_format = [report_format]   
-
-        def export_report(export_obj, _export_path, _rep_form):
-            if _export_path is None:
-                return
-            else:
-                if _rep_form == 'csv':
-                    ext = '.csv'
-                elif _rep_form =='plot':
-                    ext='.png'
-                else:
-                    ext=''
-                    
-                sitename=hvsr_results['input_params']['site']#.replace('.', '-')
-                fname = f"{sitename}_{hvsr_results['input_params']['acq_date']}_{str(hvsr_results['input_params']['starttime'].time)[:5]}-{str(hvsr_results['input_params']['endtime'].time)[:5]}{ext}"
-                fname = fname.replace(':', '')
-
-                if _export_path==True:
-                    #Check so we don't write in sample directory
-                    if pathlib.Path(hvsr_results['input_params']['datapath']) in sampleFileKeyMap.values():
-                        if pathlib.Path(os.getcwd()) in sampleFileKeyMap.values(): #Just in case current working directory is also sample directory
-                            inFile = pathlib.Path.home() #Use the path to user's home if all else fails
-                        else:
-                            inFile = pathlib.Path(os.getcwd())
-                    else:
-                        inFile = pathlib.Path(hvsr_results['input_params']['datapath'])
-                                 
-                    if inFile.is_dir():
-                        outFile = inFile.joinpath(fname)
-                    else:
-                        outFile = inFile.with_name(fname)
-                else:
-                    if pathlib.Path(_export_path).is_dir():
-                        outFile = pathlib.Path(_export_path).joinpath(fname)
-                    else:
-                        outFile=pathlib.Path(_export_path)
-
-            if _rep_form == 'csv':
-                if outFile.exists():
-                    existFile = pd.read_csv(outFile)
-                    if csv_overwrite_opt.lower() == 'append':
-                        export_obj = pd.concat([existFile, export_obj], ignore_index=True, join='inner')
-                    elif csv_overwrite_opt.lower() == 'overwrite':
-                        pass
-                    else:# csv_overwrite_opt.lower() in ['keep', 'rename']:
-                        fileNameExists = True
-                        i=1
-                        while fileNameExists:
-                            outFile = outFile.with_stem(f"{outFile.stem}_{i}")
-                            i+=1
-                            if not outFile.exists():
-                                fileNameExists = False
-                try:
-                    print(f'\nSaving csv data to: {outFile}')
-                    export_obj.to_csv(outFile, index_label='ID')
-                except:
-                    warnings.warn("Report not exported. \n\tDataframe to be exported as csv has been saved in hvsr_results['BestPeak']['Report']['CSV_Report]", category=RuntimeWarning)
-            elif _rep_form =='plot':
-                if verbose:
-                    print(f'\nSaving plot to: {outFile}')
-                plt.scf = export_obj
-                plt.savefig(outFile)
-            return 
-
-        def report_output(_report_format, _plot_type='HVSR p ann C+ p ann Spec', _export_path=None, _no_output=False, verbose=False):
-            if _report_format=='print':
-                #Print results
-
-                #Make separators for nicely formatted print output
-                sepLen = 99
-                siteSepSymbol = '='
-                intSepSymbol = u"\u2013"
-                extSepSymbol = u"\u2014"
-                
-                if sepLen % 2 == 0:
-                    remainVal = 1
-                else:
-                    remainVal = 0
-
-                siteWhitespace = 2
-                #Format the separator lines internal to each site
-                internalSeparator = intSepSymbol.center(sepLen-4, intSepSymbol).center(sepLen, ' ')
-
-                extSiteSeparator = "".center(sepLen, extSepSymbol)
-                siteSeparator = f"{hvsr_results['input_params']['site']}".center(sepLen - siteWhitespace, ' ').center(sepLen, siteSepSymbol)
-                endSiteSeparator = "".center(sepLen, siteSepSymbol)
-
-                #Start building list to print
-                report_string_list = []
-                report_string_list.append("") #Blank line to start
-                report_string_list.append(extSiteSeparator)
-                report_string_list.append(siteSeparator)
-                report_string_list.append(extSiteSeparator)
-                #report_string_list.append(internalSeparator)
-                report_string_list.append('')
-                report_string_list.append(f"\tSite Name: {hvsr_results['input_params']['site']}")
-                report_string_list.append(f"\tAcq. Date: {hvsr_results['input_params']['acq_date']}")
-                report_string_list.append(f"\tLocation : {hvsr_results['input_params']['longitude']}, {hvsr_results['input_params']['latitude']}")
-                report_string_list.append(f"\tElevation: {hvsr_results['input_params']['elevation']}")
-                report_string_list.append('')
-                report_string_list.append(internalSeparator)
-                report_string_list.append('')
-                if 'BestPeak' not in hvsr_results.keys():
-                    report_string_list.append('\tNo identifiable BestPeak was present between {} for {}'.format(hvsr_results['input_params']['hvsr_band'], hvsr_results['input_params']['site']))
-                else:
-                    report_string_list.append('\t{0:.3f} Hz Peak Frequency'.format(hvsr_results['BestPeak']['f0']))        
-                    if curvePass and peakPass:
-                        report_string_list.append('\t  {} Curve at {} Hz passed quality checks! ☺ :D'.format(sprit_utils.check_mark(), round(hvsr_results['BestPeak']['f0'],3)))
-                    else:
-                        report_string_list.append('\t  {} Peak at {} Hz did NOT pass quality checks ☹:('.format(sprit_utils.x_mark(), round(hvsr_results['BestPeak']['f0'],3)))            
-                    report_string_list.append('')
-                    report_string_list.append(internalSeparator)
-                    report_string_list.append('')
-
-                    justSize=34
-                    #Print individual results
-                    report_string_list.append('\tCurve Tests: {}/3 passed (3/3 needed)'.format(curvTestsPassed))
-                    report_string_list.append(f"\t\t {hvsr_results['BestPeak']['Report']['Lw'][-1]}"+" Length of processing windows".ljust(justSize)+f"{hvsr_results['BestPeak']['Report']['Lw']}")
-                    report_string_list.append(f"\t\t {hvsr_results['BestPeak']['Report']['Nc'][-1]}"+" Number of significant cycles".ljust(justSize)+f"{hvsr_results['BestPeak']['Report']['Nc']}")
-                    report_string_list.append(f"\t\t {hvsr_results['BestPeak']['Report']['σ_A(f)'][-1]}"+" Small H/V StDev over time".ljust(justSize)+f"{hvsr_results['BestPeak']['Report']['σ_A(f)']}")
-
-                    report_string_list.append('')
-                    report_string_list.append("\tPeak Tests: {}/6 passed (5/6 needed)".format(peakTestsPassed))
-                    report_string_list.append(f"\t\t {hvsr_results['BestPeak']['Report']['A(f-)'][-1]}"+" Peak is prominent below".ljust(justSize)+f"{hvsr_results['BestPeak']['Report']['A(f-)']}")
-                    report_string_list.append(f"\t\t {hvsr_results['BestPeak']['Report']['A(f+)'][-1]}"+" Peak is prominent above".ljust(justSize)+f"{hvsr_results['BestPeak']['Report']['A(f+)']}")
-                    report_string_list.append(f"\t\t {hvsr_results['BestPeak']['Report']['A0'][-1]}"+" Peak is large".ljust(justSize)+f"{hvsr_results['BestPeak']['Report']['A0']}")
-                    if hvsr_results['BestPeak']['PassList']['FreqStability']:
-                        res = sprit_utils.check_mark()
-                    else:
-                        res = sprit_utils.x_mark()
-                    report_string_list.append(f"\t\t {res}"+ " Peak freq. is stable over time".ljust(justSize)+ f"{hvsr_results['BestPeak']['Report']['P-'][:5]} and {hvsr_results['BestPeak']['Report']['P+'][:-1]} {res}")
-                    report_string_list.append(f"\t\t {hvsr_results['BestPeak']['Report']['Sf'][-1]}"+" Stability of peak (Freq. StDev)".ljust(justSize)+f"{hvsr_results['BestPeak']['Report']['Sf']}")
-                    report_string_list.append(f"\t\t {hvsr_results['BestPeak']['Report']['Sa'][-1]}"+" Stability of peak (Amp. StDev)".ljust(justSize)+f"{hvsr_results['BestPeak']['Report']['Sa']}")
-                report_string_list.append('')
-                report_string_list.append(f"Calculated using {hvsr_results['hvsr_df']['Use'].sum()}/{hvsr_results['hvsr_df']['Use'].count()} time windows".rjust(sepLen-1))
-                report_string_list.append(extSiteSeparator)
-                #report_string_list.append(endSiteSeparator)
-                #report_string_list.append(extSiteSeparator)
-                report_string_list.append('')
-                
-                reportStr=''
-                #Now print it
-                for line in report_string_list:
-                    reportStr = reportStr+'\n'+line
-
-                if not _no_output:
-                    print(reportStr)
-
-                export_report(export_obj=reportStr, _export_path=_export_path, _rep_form=_report_format)
-                hvsr_results['BestPeak']['Report']['Print_Report'] = reportStr
-                hvsr_results['Print_Report'] = reportStr
-
-            elif _report_format=='csv':
-                import pandas as pd
-                pdCols = ['Site Name', 'Acq_Date', 'Longitude', 'Latitide', 'Elevation', 'PeakFrequency', 
-                        'WindowLengthFreq.','SignificantCycles','LowCurveStDevOverTime',
-                        'PeakProminenceBelow','PeakProminenceAbove','PeakAmpClarity','FreqStability', 'PeakStability_FreqStD','PeakStability_AmpStD', 'PeakPasses']
-                d = hvsr_results
-                criteriaList = []
-                for p in hvsr_results['BestPeak']["PassList"]:
-                    criteriaList.append(hvsr_results['BestPeak']["PassList"][p])
-                criteriaList.append(hvsr_results['BestPeak']["PeakPasses"])
-                dfList = [[d['input_params']['site'], d['input_params']['acq_date'], d['input_params']['longitude'], d['input_params']['latitude'], d['input_params']['elevation'], round(d['BestPeak']['f0'], 3)]]
-                dfList[0].extend(criteriaList)
-                outDF = pd.DataFrame(dfList, columns=pdCols)
-
-                if verbose:
-                    print('\nCSV Report:\n')
-                    maxColWidth = 13
-                    print('  ', end='')
-                    for col in outDF.columns:
-                        if len(str(col)) > maxColWidth:
-                            colStr = str(col)[:maxColWidth-3]+'...'
-                        else:
-                            colStr = str(col)
-                        print(colStr.ljust(maxColWidth), end='  ')
-                    print() #new line
-                    for c in range(len(outDF.columns) * (maxColWidth+2)):
-                        if c % (maxColWidth+2) == 0:
-                            print('|', end='')
-                        else:
-                            print('-', end='')
-                    print('|') #new line
-                    print('  ', end='') #Small indent at start                    
-                    for row in outDF.iterrows():
-                        for col in row[1]:
-                            if len(str(col)) > maxColWidth:
-                                colStr = str(col)[:maxColWidth-3]+'...'
-                            else:
-                                colStr = str(col)
-                            print(colStr.ljust(maxColWidth), end='  ')
-                        print()
-
-                try:
-                    export_report(export_obj=outDF, _export_path=_export_path, _rep_form=_report_format)
-                except:
-                    print("Error in exporting csv report. CSV not exported")
-                hvsr_results['BestPeak']['Report']['CSV_Report'] = outDF
-                hvsr_results['CSV_Report'] = outDF
-                        
-            elif _report_format=='plot':
-                fig_ax = plot_hvsr(hvsr_results, plot_type=_plot_type, show=False, return_fig=True)
-
-                export_report(export_obj=fig_ax[0], _export_path=_export_path, _rep_form=_report_format)
-                hvsr_results['BestPeak']['Report']['HV_Plot']=hvsr_results['HV_Plot']=fig_ax
-
-                print('\nPlot of data report:')
-                plt.show()
-                
-            return hvsr_results
-
-        for i, rep_form in enumerate(report_format):
-            if isinstance(export_path, (list, tuple)):
-                if not isinstance(report_format, (list, tuple)):
-                    warnings.warn('export_path is a list/tuple and report_format is not. This may result in unexpected behavior.')
-                if isinstance(report_format, (list, tuple)) and isinstance(export_path, (list, tuple)) and len(report_format) != len(export_path):
-                    warnings.warn('export_path and report_format are both lists or tuples, but they are not the same length. This may result in unexpected behavior.')
-            
-                exp_path = export_path[i]
-            else:
-                exp_path = export_path
-            hvsr_results = report_output(_report_format=rep_form, _plot_type=plot_type, _export_path=exp_path, _no_output=no_output, verbose=verbose)
-
-        hvsr_results['processing_parameters']['get_report'] = {}
-        for key, value in orig_args.items():
-            hvsr_results['processing_parameters']['get_report'][key] = value
-
-    return hvsr_results
-
-def gui(kind='default') +def gui(kind='browser')

Function to open a graphical user interface (gui)

@@ -5077,90 +444,12 @@

Parameters

"widget" opens jupyter widget' "lite" open lite (pending update), by default 'default'
-
- -Expand source code - -
def gui(kind='default'):
-    """Function to open a graphical user interface (gui)
-
-    Parameters
-    ----------
-    kind : str, optional
-        What type of gui to open. "default" opens regular windowed interface, 
-        "widget" opens jupyter widget'
-        "lite" open lite (pending update), by default 'default'
-
-    """
-    defaultList = ['windowed', 'window', 'default', 'd']
-    widgetList = ['widget', 'jupyter', 'notebook', 'w', 'nb']
-    liteList = ['lite', 'light', 'basic', 'l', 'b']
-
-    if kind.lower() in defaultList:
-        import pkg_resources
-        #guiPath = pathlib.Path(os.path.realpath(__file__))
-        try:
-            from sprit.sprit_gui import SPRIT_App
-        except:
-            from sprit_gui import SPRIT_App
-        
-        try:
-            import tkinter as tk
-        except:
-            if sys.platform == 'linux':
-                raise ImportError('The SpRIT graphical interface uses tkinter, which ships with python but is not pre-installed on linux machines. Use "apt-get install python-tk" or "apt-get install python3-tk" to install tkinter. You may need to use the sudo command at the start of those commands.')
-
-        def on_gui_closing():
-            plt.close('all')
-            gui_root.quit()
-            gui_root.destroy()
-
-        if sys.platform == 'linux':
-            if not pathlib.Path("/usr/share/doc/python3-tk").exists():
-                warnings.warn('The SpRIT graphical interface uses tkinter, which ships with python but is not pre-installed on linux machines. Use "apt-get install python-tk" or "apt-get install python3-tk" to install tkinter. You may need to use the sudo command at the start of those commands.')
-
-        gui_root = tk.Tk()
-        try:
-            try:
-                icon_path =pathlib.Path(pkg_resources.resource_filename(__name__, 'resources/icon/sprit_icon_alpha.ico')) 
-                gui_root.iconbitmap(icon_path)
-            except:
-                icon_path = pathlib.Path(pkg_resources.resource_filename(__name__, 'resources/icon/sprit_icon.png'))
-                gui_root.iconphoto(False, tk.PhotoImage(file=icon_path.as_posix()))
-        except Exception as e:
-            print("ICON NOT LOADED, still opening GUI")
-
-        gui_root.resizable(True, True)
-        spritApp = SPRIT_App(master=gui_root) #Open the app with a tk.Tk root
-
-        gui_root.protocol("WM_DELETE_WINDOW", on_gui_closing)    
-        gui_root.mainloop() #Run the main loop
-    elif kind.lower() in widgetList:
-        try:
-            sprit_jupyter_UI.create_jupyter_ui()
-        except Exception as e:
-            print(e)
-
def has_required_channels(stream)
-
- -Expand source code - -
def has_required_channels(stream):
-    channel_set = set()
-    
-    # Extract the channel codes from the traces in the stream
-    for trace in stream:
-        channel_set.add(trace.stats.channel)
-    
-    # Check if Z, E, and N channels are present
-    return {'Z', 'E', 'N'}.issubset(channel_set)
-
def import_data(import_filepath, data_format='pickle') @@ -5179,74 +468,29 @@

Returns

HVSRData or HVSRBatch object
 
-
- -Expand source code - -
def import_data(import_filepath, data_format='pickle'):
-    """Function to import .hvsr (or other extension) data exported using export_data() function
-
-    Parameters
-    ----------
-    import_filepath : str or path object
-        Filepath of file created using export_data() function. This is usually a pickle file with a .hvsr extension
-    data_format : str, default='pickle'
-        Type of format data is in. Currently, only 'pickle' supported. Eventually, json or other type may be supported, by default 'pickle'.
-
-    Returns
-    -------
-    HVSRData or HVSRBatch object
-    """
-    if data_format=='pickle':
-        with open(import_filepath, 'rb') as f:
-            dataIN = pickle.load(f)
-    else:
-        dataIN = import_filepath
-    return dataIN
-
-
-def import_settings(settings_import_path, settings_import_type='instrument', verbose=False) -
-
-
-
- -Expand source code - -
def import_settings(settings_import_path, settings_import_type='instrument', verbose=False):
-
-    allList = ['all', ':', 'both', 'any']
-    if settings_import_type.lower() not in allList:
-        # if just a single settings dict is desired
-        with open(settings_import_path, 'r') as f:
-            settingsDict = json.load(f)
-    else:
-        # Either a directory or list
-        if isinstance(settings_import_path, (list, tuple)):
-            for setPath in settings_import_path:
-                pass
-        else:
-            settings_import_path = sprit_utils.checkifpath(settings_import_path)
-            if not settings_import_path.is_dir():
-                raise RuntimeError(f'settings_import_type={settings_import_type}, but settings_import_path is not list/tuple or filepath to directory')
-            else:
-                instFile = settings_import_path.glob('*.inst')
-                procFile = settings_import_path.glob('*.proc')
-    return settingsDict
-
+
+def import_settings(settings_import_path, settings_import_type='instrument', verbose=False) +
+
+
-def input_params(datapath, site='HVSR Site', network='AM', station='RAC84', loc='00', channels=['EHZ', 'EHN', 'EHE'], acq_date='2024-02-19', starttime='00:00:00.00', endtime='23:59:59.999999', tzone='UTC', xcoord=-88.2290526, ycoord=40.1012122, elevation=755, input_crs='EPSG:4326', output_crs='EPSG:4326', elev_unit='feet', depth=0, instrument='Raspberry Shake', metapath=None, hvsr_band=[1, 40], peak_freq_range=[1, 40], processing_parameters={}, verbose=False) +def input_params(input_data, site='HVSR Site', id_prefix=None, network='AM', station='RAC84', loc='00', channels=['EHZ', 'EHN', 'EHE'], acq_date='2024-10-30', starttime=UTCDateTime(2024, 10, 30, 0, 0), endtime=UTCDateTime(2024, 10, 30, 23, 59, 59, 999999), tzone='UTC', xcoord=-88.2290526, ycoord=40.1012122, elevation=755, input_crs=None, output_crs=None, elev_unit='meters', depth=0, instrument='Raspberry Shake', metapath=None, hvsr_band=[0.4, 40], peak_freq_range=[0.4, 40], processing_parameters={}, verbose=False)

Function for designating input parameters for reading in and processing data

Parameters

-
datapath : str or pathlib.Path object
+
input_data : str or pathlib.Path object
Filepath of data. This can be a directory or file, but will need to match with what is chosen later as the source parameter in fetch_data()
site : str, default="HVSR Site"
Site name as designated by user for ease of reference. Used for plotting titles, filenames, etc.
+
id_prefix : str, default=None
+
A prefix that may be used to create unique identifiers for each site. +The identifier created is saved as the ['HVSR_ID'] attribute of the HVSRData object, +and is equivalent to the following formatted string: +f"{id_prefix}-{acq_date.strftime("%Y%m%d")}-{starttime.strftime("%H%M")}-{station}".
network : str, default='AM'
The network designation of the seismometer. This is necessary for data from Raspberry Shakes. 'AM' is for Amateur network, which fits Raspberry Shakes.
station : str, default='RAC84'
@@ -5268,24 +512,24 @@

Parameters

If int, should be the int value of the UTC offset (e.g., for American Eastern Standard Time: -5). This is necessary for Raspberry Shake data in 'raw' format.
xcoord : float, default=-88.2290526
-
Longitude (or easting, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in csv output, but will likely be used in future for mapping/profile purposes.
+
Longitude (or easting, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in table output, but will likely be used in future for mapping/profile purposes.
ycoord : float, default=40.1012122
-
Latitute (or northing, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in csv output, but will likely be used in future for mapping/profile purposes.
+
Latitute (or northing, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in table output, but will likely be used in future for mapping/profile purposes.
input_crs : str or other format read by pyproj, default='EPSG:4326'
Coordinate reference system of input data, as used by pyproj.CRS.from_user_input()
output_crs : str or other format read by pyproj, default='EPSG:4326'
Coordinate reference system to which input data will be transformed, as used by pyproj.CRS.from_user_input()
elevation : float, default=755
-
Surface elevation of data point. Not currently used (except in csv output), but will likely be used in the future.
+
Surface elevation of data point. Not currently used (except in table output), but will likely be used in the future.
depth : float, default=0
Depth of seismometer. Not currently used, but will likely be used in the future.
-
instrument : str or list {'Raspberry Shake')
+
instrument : str {'Raspberry Shake', "Tromino"}
Instrument from which the data was acquired.
metapath : str or pathlib.Path object, default=None
Filepath of metadata, in format supported by obspy.read_inventory. If default value of None, will read from resources folder of repository (only supported for Raspberry Shake).
-
hvsr_band : list, default=[1, 40]
+
hvsr_band : list, default=[0.4, 40]
Two-element list containing low and high "corner" frequencies (in Hz) for processing. This can specified again later.
-
peak_freq_range : list or tuple, default=[1, 40]
+
peak_freq_range : list or tuple, default=[0.4, 40]
Two-element list or tuple containing low and high frequencies (in Hz) that are used to check for HVSR Peaks. This can be a tigher range than hvsr_band, but if larger, it will still only use the hvsr_band range.
processing_parameters={} : dict or filepath, default={}
If filepath, should point to a .proc json file with processing parameters (i.e, an output from sprit.export_settings()).
@@ -5293,7 +537,7 @@

Parameters

If dictionary, dictionary containing nested dictionaries of function names as they key, and the parameter names/values as key/value pairs for each key.
If a function name is not present, or if a parameter name is not present, default values will be used.
For example:
-
{ 'fetch_data' : {'source':'batch', 'trim_dir':"/path/to/trimmed/data", 'export_format':'mseed', 'detrend':'spline', 'plot_input_stream':True, 'verbose':False, kwargs:{'kwargskey':'kwargsvalue'}} }
+
{ 'fetch_data' : {'source':'batch', 'data_export_path':"/path/to/trimmed/data", 'data_export_format':'mseed', 'detrend':'spline', 'plot_input_stream':True, 'verbose':False, kwargs:{'kwargskey':'kwargsvalue'}} }
verbose : bool, default=False
Whether to print output and results to terminal
@@ -5302,277 +546,45 @@

Returns

params : HVSRData
sprit.HVSRData class containing input parameters, including data file path and metadata path. This will be used as an input to other functions. If batch processing, params will be converted to batch type in fetch_data() step.
-
- -Expand source code - -
def input_params(datapath,
-                site='HVSR Site',
-                network='AM', 
-                station='RAC84', 
-                loc='00', 
-                channels=['EHZ', 'EHN', 'EHE'],
-                acq_date=str(datetime.datetime.now().date()),
-                starttime = '00:00:00.00',
-                endtime = '23:59:59.999999',
-                tzone = 'UTC',
-                xcoord = -88.2290526,
-                ycoord =  40.1012122,
-                elevation = 755,
-                input_crs='EPSG:4326',#4269 is NAD83, defautling to WGS
-                output_crs='EPSG:4326',
-                elev_unit = 'feet',
-                depth = 0,
-                instrument = 'Raspberry Shake',
-                metapath = None,
-                hvsr_band = [1, 40],
-                peak_freq_range=[1, 40],
-                processing_parameters={},
-                verbose=False
-                ):
-    """Function for designating input parameters for reading in and processing data
-    
-    Parameters
-    ----------
-    datapath : str or pathlib.Path object
-        Filepath of data. This can be a directory or file, but will need to match with what is chosen later as the source parameter in fetch_data()
-    site : str, default="HVSR Site"
-        Site name as designated by user for ease of reference. Used for plotting titles, filenames, etc.
-    network : str, default='AM'
-        The network designation of the seismometer. This is necessary for data from Raspberry Shakes. 'AM' is for Amateur network, which fits Raspberry Shakes.
-    station : str, default='RAC84'
-        The station name of the seismometer. This is necessary for data from Raspberry Shakes.
-    loc : str, default='00'
-        Location information of the seismometer.
-    channels : list, default=['EHZ', 'EHN', 'EHE']
-        The three channels used in this analysis, as a list of strings. Preferred that Z component is first, but not necessary
-    acq_date : str, int, date object, or datetime object
-        If string, preferred format is 'YYYY-MM-DD'. 
-        If int, this will be interpreted as the time_int of year of current year (e.g., 33 would be Feb 2 of current year)
-        If date or datetime object, this will be the date. Make sure to account for time change when converting to UTC (if UTC is the following time_int, use the UTC time_int).
-    starttime : str, time object, or datetime object, default='00:00:00.00'
-        Start time of data stream. This is necessary for Raspberry Shake data in 'raw' form, or for trimming data. Format can be either 'HH:MM:SS.micros' or 'HH:MM' at minimum.
-    endtime : str, time obejct, or datetime object, default='23:59:99.99'
-        End time of data stream. This is necessary for Raspberry Shake data in 'raw' form, or for trimming data. Same format as starttime.
-    tzone : str or int, default = 'UTC'
-        Timezone of input data. If string, 'UTC' will use the time as input directly. Any other string value needs to be a TZ identifier in the IANA database, a wikipedia page of these is available here: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones.
-        If int, should be the int value of the UTC offset (e.g., for American Eastern Standard Time: -5). 
-        This is necessary for Raspberry Shake data in 'raw' format.
-    xcoord : float, default=-88.2290526
-        Longitude (or easting, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in csv output, but will likely be used in future for mapping/profile purposes.
-    ycoord : float, default=40.1012122
-        Latitute (or northing, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in csv output, but will likely be used in future for mapping/profile purposes.
-    input_crs : str or other format read by pyproj, default='EPSG:4326'
-        Coordinate reference system of input data, as used by pyproj.CRS.from_user_input()
-    output_crs : str or other format read by pyproj, default='EPSG:4326'
-        Coordinate reference system to which input data will be transformed, as used by pyproj.CRS.from_user_input()
-    elevation : float, default=755
-        Surface elevation of data point. Not currently used (except in csv output), but will likely be used in the future.
-    depth : float, default=0
-        Depth of seismometer. Not currently used, but will likely be used in the future.
-    instrument : str or list {'Raspberry Shake')
-        Instrument from which the data was acquired. 
-    metapath : str or pathlib.Path object, default=None
-        Filepath of metadata, in format supported by obspy.read_inventory. If default value of None, will read from resources folder of repository (only supported for Raspberry Shake).
-    hvsr_band : list, default=[1, 40]
-        Two-element list containing low and high "corner" frequencies (in Hz) for processing. This can specified again later.
-    peak_freq_range : list or tuple, default=[1, 40]
-        Two-element list or tuple containing low and high frequencies (in Hz) that are used to check for HVSR Peaks. This can be a tigher range than hvsr_band, but if larger, it will still only use the hvsr_band range.
-    processing_parameters={} : dict or filepath, default={}
-        If filepath, should point to a .proc json file with processing parameters (i.e, an output from sprit.export_settings()). 
-        Note that this only applies to parameters for the functions: 'fetch_data', 'remove_noise', 'generate_ppsds', 'process_hvsr', 'check_peaks', and 'get_report.'
-        If dictionary, dictionary containing nested dictionaries of function names as they key, and the parameter names/values as key/value pairs for each key. 
-        If a function name is not present, or if a parameter name is not present, default values will be used.
-        For example: 
-            `{ 'fetch_data' : {'source':'batch', 'trim_dir':"/path/to/trimmed/data", 'export_format':'mseed', 'detrend':'spline', 'plot_input_stream':True, 'verbose':False, kwargs:{'kwargskey':'kwargsvalue'}} }`
-    verbose : bool, default=False
-        Whether to print output and results to terminal
-
-    Returns
-    -------
-    params : sprit.HVSRData
-        sprit.HVSRData class containing input parameters, including data file path and metadata path. This will be used as an input to other functions. If batch processing, params will be converted to batch type in fetch_data() step.
-
-    """
-    orig_args = locals().copy() #Get the initial arguments
-    start_time = datetime.datetime.now()
-
-    #Reformat times
-    if type(acq_date) is datetime.datetime:
-        date = str(acq_date.date())
-    elif type(acq_date) is datetime.date:
-        date=str(acq_date)
-    elif type(acq_date) is str:
-        monthStrs = {'jan':1, 'january':1,
-                    'feb':2, 'february':2,
-                    'mar':3, 'march':3,
-                    'apr':4, 'april':4,
-                    'may':5,
-                    'jun':6, 'june':6,
-                    'jul':7, 'july':7,
-                    'aug':8, 'august':8,
-                    'sep':9, 'sept':9, 'september':9,
-                    'oct':10,'october':10, 
-                    'nov':11,'november':11,
-                    'dec':12,'december':12}
-
-        spelledMonth = False
-        for m in monthStrs.keys():
-            acq_date = acq_date.lower()
-            if m in acq_date:
-                spelledMonth = True
-                break
-
-        if spelledMonth is not False:
-            month = monthStrs[m]
-
-        if '/' in acq_date:
-            sep = '/'
-        elif '.' in acq_date:
-            sep='.'
-        elif ' ' in acq_date:
-            sep = ' '
-            acq_date = acq_date.replace(',', '')
-        else:
-            sep = '-'
-
-        acq_date = acq_date.split(sep)
-        if len(acq_date[2]) > 2: #American format
-            date = '{}-{}-{}'.format(acq_date[2], acq_date[0], acq_date[1])
-        else: #international format, one we're going to use
-            date = '{}-{}-{}'.format(acq_date[0], acq_date[1], acq_date[2])     
-
-    elif type(acq_date) is int:
-        year=datetime.datetime.today().year
-        date = str((datetime.datetime(year, 1, 1) + datetime.timedelta(acq_date - 1)).date())
-    
-    if type(starttime) is str:
-        if 'T' in starttime:
-            #date=starttime.split('T')[0]
-            starttime = starttime.split('T')[1]
-        else:
-            pass
-            #starttime = date+'T'+starttime
-    elif type(starttime) is datetime.datetime:
-        #date = str(starttime.date())
-        starttime = str(starttime.time())
-        ###HERE IS NEXT
-    elif type(starttime) is datetime.time():
-        starttime = str(starttime)
-    
-    starttime = str(date)+"T"+str(starttime)
-    starttime = obspy.UTCDateTime(sprit_utils.format_time(starttime, tzone=tzone))
-    
-    if type(endtime) is str:
-        if 'T' in endtime:
-            date=endtime.split('T')[0]
-            endtime = endtime.split('T')[1]
-    elif type(endtime) is datetime.datetime:
-        date = str(endtime.date())
-        endtime = str(endtime.time())
-    elif type(endtime) is datetime.time():
-        endtime = str(endtime)
-
-    endtime = str(date)+"T"+str(endtime)
-    endtime = obspy.UTCDateTime(sprit_utils.format_time(endtime, tzone=tzone))
-
-    acq_date = datetime.date(year=int(date.split('-')[0]), month=int(date.split('-')[1]), day=int(date.split('-')[2]))
-    raspShakeInstNameList = ['raspberry shake', 'shake', 'raspberry', 'rs', 'rs3d', 'rasp. shake', 'raspshake']
-    
-    if output_crs is None:
-        output_crs='EPSG:4326'
-
-    if input_crs is None:
-        input_crs = 'EPSG:4326'#Default to WGS84
-    else:        
-        input_crs = CRS.from_user_input(input_crs)
-        output_crs = CRS.from_user_input(output_crs)
-
-        coord_transformer = Transformer.from_crs(input_crs, output_crs, always_xy=True)
-        xcoord, ycoord = coord_transformer.transform(xcoord, ycoord)
-
-    #Add key/values to input parameter dictionary
-    inputParamDict = {'site':site, 'net':network,'sta':station, 'loc':loc, 'cha':channels, 'instrument':instrument,
-                    'acq_date':acq_date,'starttime':starttime,'endtime':endtime, 'timezone':'UTC', #Will be in UTC by this point
-                    'longitude':xcoord,'latitude':ycoord,'elevation':elevation,'input_crs':input_crs, 'output_crs':output_crs,
-                    'depth':depth, 'datapath': datapath, 'metapath':metapath, 'hvsr_band':hvsr_band, 'peak_freq_range':peak_freq_range,
-                    'ProcessingStatus':{'InputParamsStatus':True, 'OverallStatus':True}
-                    }
-    
-    #Replace any default parameter settings with those from json file of interest, potentially
-    instrument_settings_dict = {}
-    if pathlib.Path(instrument).exists():
-        instrument_settings = import_settings(settings_import_path=instrument, settings_import_type='instrument', verbose=verbose)
-        input_params_args = inspect.getfullargspec(input_params).args
-        input_params_args.append('net')
-        input_params_args.append('sta')
-        for k, settings_value in instrument_settings.items():
-            if k in input_params_args:
-                instrument_settings_dict[k] = settings_value
-        inputParamDict['instrument_settings'] = inputParamDict['instrument']
-        inputParamDict.update(instrument_settings_dict)
-    
-    if instrument.lower() in raspShakeInstNameList:
-        if metapath is None or metapath=='':
-            metapath = pathlib.Path(pkg_resources.resource_filename(__name__, 'resources/rs3dv5plus_metadata.inv')).as_posix()
-            inputParamDict['metapath'] = metapath
-            #metapath = pathlib.Path(os.path.realpath(__file__)).parent.joinpath('/resources/rs3dv7_metadata.inv')
-
-    for settingName in instrument_settings_dict.keys():
-        if settingName in inputParamDict.keys():
-            inputParamDict[settingName] = instrument_settings_dict[settingName]
-
-    #Declare obspy here instead of at top of file for (for example) colab, where obspy first needs to be installed on environment
-    if verbose:
-        print('Gathering input parameters (input_params())')
-        for key, value in inputParamDict.items():
-            print('\t  {}={}'.format(key, value))
-        print()
-
-    if isinstance(processing_parameters, dict):
-        inputParamDict['processing_parameters'] = processing_parameters
-    else:
-        processing_parameters = sprit_utils.checkifpath(processing_parameters)
-        inputParamDict['processing_parameters'] = import_settings(processing_parameters, settings_import_type='processing', verbose=verbose)
-
-    #Format everything nicely
-    params = sprit_utils.make_it_classy(inputParamDict)
-    params['ProcessingStatus']['InputParamsStatus'] = True
-    params = _check_processing_status(params, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose)
-    return params
-
def make_it_classy(input_data, verbose=False)
-
- -Expand source code - -
def make_it_classy(input_data, verbose=False):
-    if isinstance(input_data, (sprit_hvsr.HVSRData, sprit_hvsr.HVSRBatch)):
-        for k, v in input_data.items():
-            if k=='input_params':
-                for kin in input_data['input_params'].keys():
-                    if kin not in input_data.keys():
-                        input_data[kin] = input_data['input_params'][kin]
-            if k=='params':
-                for kin in input_data['params'].keys():
-                    print(kin)
-                    if kin not in input_data.keys():
-                        input_data[kin] = input_data['params'][kin]                
-        output_class = input_data
-    else:
-        output_class = sprit_hvsr.HVSRData(input_data)
-    if verbose:
-        print('Made it classy | {} --> {}'.format(type(input_data), type(output_class)))
-    return output_class
-
+
+
+def parse_plot_string(plot_string) +
+
+
+
+
+def plot_azimuth(hvsr_data, fig=None, ax=None, show_azimuth_peaks=False, interpolate_azimuths=True, show_azimuth_grid=False, show_plot=True, **plot_azimuth_kwargs) +
+
+

Function to plot azimuths when azimuths are calculated

+

Parameters

+
+
hvsr_data : HVSRData or HVSRBatch
+
HVSRData that has gone through at least the sprit.fetch_data() step, and before sprit.generate_ppsds()
+
show_azimuth_peaks : bool, optional
+
Whether to display the peak value at each azimuth calculated on the chart, by default False
+
interpolate_azimuths : bool, optional
+
Whether to interpolate the azimuth data to get a smoother plot. +This is just for visualization, does not change underlying data. +It takes a lot of time to process the data, but interpolation for vizualization can happen fairly fast. By default True.
+
show_azimuth_grid : bool, optional
+
Whether to display the grid on the chart, by default False
+
+

Returns

+
+
matplotlib.Figure, matplotlib.Axis
+
Figure and axis of resulting azimuth plot
+
-def plot_hvsr(hvsr_data, plot_type='HVSR ann p C+ ann p SPEC', use_subplots=True, fig=None, ax=None, return_fig=False, save_dir=None, save_suffix='', show_legend=False, show=True, close_figs=False, clear_fig=True, **kwargs) +def plot_hvsr(hvsr_data, plot_type='HVSR ann p C+ ann p SPEC ann p', azimuth='HV', use_subplots=True, fig=None, ax=None, return_fig=False, plot_engine='matplotlib', save_dir=None, save_suffix='', show_legend=False, show_plot=True, close_figs=False, clear_fig=True, **kwargs)

Function to plot HVSR data

@@ -5581,14 +593,19 @@

Parameters

hvsr_data : dict
Dictionary containing output from process_hvsr function
-
plot_type : str or list, default = 'HVSR ann p C+ ann p SPEC'
+
plot_type : str or list, default = 'HVSR ann p C+ ann p SPEC ann p'
The plot_type of plot(s) to plot. If list, will plot all plots listed - 'HVSR' - Standard HVSR plot, including standard deviation. Options are included below: - 'p' shows a vertical dotted line at frequency of the "best" peak - 'ann' annotates the frequency value of of the "best" peak - 'all' shows all the peaks identified in check_peaks() (by default, only the max is identified) - 't' shows the H/V curve for all time windows --'tp' shows all the peaks from the H/V curves of all the time windows +- 'tp' shows all the peaks from the H/V curves of all the time windows +- 'fr' shows the window within which SpRIT will search for peak frequencies, as set by peak_freq_range +- 'test' shows a visualization of the results of the peak validity test(s). Examples: +- 'tests' visualizes the results of all the peak tests (not the curve tests) +- 'test12' shows the results of tests 1 and 2. +- Append any number 1-6 after 'test' to show a specific test result visualized - 'COMP' - plot of the PPSD curves for each individual component ("C" also works) - '+' (as a suffix in 'C+' or 'COMP+') plots C on a plot separate from HVSR (C+ is default, but without + will plot on the same plot as HVSR) - 'p' shows a vertical dotted line at frequency of the "best" peak @@ -5597,7 +614,17 @@

Parameters

- 't' shows the H/V curve for all time windows - 'SPEC' - spectrogram style plot of the H/V curve over time - 'p' shows a horizontal dotted line at the frequency of the "best" peak -- 'ann' annotates the frequency value of the "best" peak
+- 'ann' annotates the frequency value of the "best" peak +- 'all' shows all the peaks identified in check_peaks() +- 'tp' shows all the peaks of the H/V curve at all time windows +- 'AZ' - circular plot of calculated azimuthal HV curves, similar in style to SPEC plot. +- 'p' shows a point at each calculated (not interpolated) azimuth peak +- 'g' shows grid lines at various angles +- 'i' interpolates so that there is an interpolated azimuth at each degree interval (1 degree step) +This is the default, so usually 'i' is not needed. +- '-i' prohibits interpolation (only shows the calculated azimuths, as determined by azimuth_angle (default = 30)) +
azimuth : str, default = 'HV'
+
What 'azimuth' to plot, default being standard N E components combined
use_subplots : bool, default = True
Whether to output the plots as subplots (True) or as separate plots (False)
fig : matplotlib.Figure, default = None
@@ -5606,13 +633,15 @@

Parameters

If not None, matplotlib axis on which plot is plotted
return_fig : bool
Whether to return figure and axis objects
+
plot_engine : str, default='Matplotlib'
+
Which engine to use for plotting. Both "matplotlib" and "plotly" are acceptable. For shorthand, 'mpl', 'm' also work for matplotlib; 'plty' or 'p' also work for plotly. Not case sensitive.
save_dir : str or None
Directory in which to save figures
save_suffix : str
Suffix to add to end of figure filename(s), if save_dir is used
show_legend : bool, default=False
Whether to show legend in plot
-
show : bool
+
show_plot : bool
Whether to show plot
close_figs : bool, default=False
Whether to close figures before plotting
@@ -5626,185 +655,27 @@

Returns

fig, ax : matplotlib figure and axis objects
Returns figure and axis matplotlib.pyplot objects if return_fig=True, otherwise, simply plots the figures
-
- -Expand source code - -
def plot_hvsr(hvsr_data, plot_type='HVSR ann p C+ ann p SPEC', use_subplots=True, fig=None, ax=None, return_fig=False,  save_dir=None, save_suffix='', show_legend=False, show=True, close_figs=False, clear_fig=True,**kwargs):
-    """Function to plot HVSR data
-
-    Parameters
-    ----------
-    hvsr_data : dict                  
-        Dictionary containing output from process_hvsr function
-    plot_type : str or list, default = 'HVSR ann p C+ ann p SPEC'
-        The plot_type of plot(s) to plot. If list, will plot all plots listed
-        - 'HVSR' - Standard HVSR plot, including standard deviation. Options are included below:
-            - 'p' shows a vertical dotted line at frequency of the "best" peak
-            - 'ann' annotates the frequency value of of the "best" peak
-            - 'all' shows all the peaks identified in check_peaks() (by default, only the max is identified)
-            - 't' shows the H/V curve for all time windows
-                -'tp' shows all the peaks from the H/V curves of all the time windows
-        - 'COMP' - plot of the PPSD curves for each individual component ("C" also works)
-            - '+' (as a suffix in 'C+' or 'COMP+') plots C on a plot separate from HVSR (C+ is default, but without + will plot on the same plot as HVSR)
-            - 'p' shows a vertical dotted line at frequency of the "best" peak
-            - 'ann' annotates the frequency value of of the "best" peak
-            - 'all' shows all the peaks identified in check_peaks() (by default, only the max is identified)
-            - 't' shows the H/V curve for all time windows
-        - 'SPEC' - spectrogram style plot of the H/V curve over time
-            - 'p' shows a horizontal dotted line at the frequency of the "best" peak
-            - 'ann' annotates the frequency value of the "best" peak
-    use_subplots : bool, default = True
-        Whether to output the plots as subplots (True) or as separate plots (False)
-    fig : matplotlib.Figure, default = None
-        If not None, matplotlib figure on which plot is plotted
-    ax : matplotlib.Axis, default = None
-        If not None, matplotlib axis on which plot is plotted
-    return_fig : bool
-        Whether to return figure and axis objects
-    save_dir : str or None
-        Directory in which to save figures
-    save_suffix : str
-        Suffix to add to end of figure filename(s), if save_dir is used
-    show_legend : bool, default=False
-        Whether to show legend in plot
-    show : bool
-        Whether to show plot
-    close_figs : bool, default=False
-        Whether to close figures before plotting
-    clear_fig : bool, default=True
-        Whether to clear figures before plotting
-    **kwargs : keyword arguments
-        Keyword arguments for matplotlib.pyplot
-
-    Returns
-    -------
-    fig, ax : matplotlib figure and axis objects
-        Returns figure and axis matplotlib.pyplot objects if return_fig=True, otherwise, simply plots the figures
-    """
-    orig_args = locals().copy() #Get the initial arguments
-    if isinstance(hvsr_data, HVSRBatch):
-        #If running batch, we'll loop through each site
-        for site_name in hvsr_data.keys():
-            args = orig_args.copy() #Make a copy so we don't accidentally overwrite
-            individual_params = hvsr_data[site_name] #Get what would normally be the "params" variable for each site
-            args['hvsr_results'] = individual_params #reset the params parameter we originally read in to an individual site params
-            if hvsr_data[site_name]['ProcessingStatus']['OverallStatus']:
-                try:
-                    _hvsr_plot_batch(**args) #Call another function, that lets us run this function again
-                except:
-                    print(f"{site_name} not able to be plotted.")
-    else:
-        if clear_fig and fig is not None and ax is not None: #Intended use for tkinter
-            #Clear everything
-            for key in ax:
-                ax[key].clear()
-            for t in fig.texts:
-                del t
-            fig.clear()
-        if close_figs:
-            plt.close('all')
-
-        compList = ['c', 'comp', 'component', 'components']
-        specgramList = ['spec', 'specgram', 'spectrogram']
-        hvsrList = ['hvsr', 'hv', 'h']
-
-        hvsrInd = np.nan
-        compInd = np.nan
-        specInd = np.nan
-
-        kList = plot_type.split(' ')
-        for i, k in enumerate(kList):
-            kList[i] = k.lower()
-
-        #Get the plots in the right order, no matter how they were input (and ensure the right options go with the right plot)
-        #HVSR index
-        if len(set(hvsrList).intersection(kList)):
-            for i, hv in enumerate(hvsrList):
-                if hv in kList:
-                    hvsrInd = kList.index(hv)
-                    break
-        #Component index
-        #if len(set(compList).intersection(kList)):
-        for i, c in enumerate(kList):
-            if '+' in c and c[:-1] in compList:
-                compInd = kList.index(c)
-                break
-            
-        #Specgram index
-        if len(set(specgramList).intersection(kList)):
-            for i, sp in enumerate(specgramList):
-                if sp in kList:
-                    specInd = kList.index(sp)
-                    break        
-
-        indList = [hvsrInd, compInd, specInd]
-        indListCopy = indList.copy()
-        plotTypeList = ['hvsr', 'comp', 'spec']
-
-        plotTypeOrder = []
-        plotIndOrder = []
-
-        lastVal = 0
-        while lastVal != 99:
-            firstInd = np.nanargmin(indListCopy)
-            plotTypeOrder.append(plotTypeList[firstInd])
-            plotIndOrder.append(indList[firstInd])
-            lastVal = indListCopy[firstInd]
-            indListCopy[firstInd] = 99 #just a high number
-
-        plotTypeOrder.pop()
-        plotIndOrder[-1]=len(kList)
-        
-        for i, p in enumerate(plotTypeOrder):
-            pStartInd = plotIndOrder[i]
-            pEndInd = plotIndOrder[i+1]
-            plotComponents = kList[pStartInd:pEndInd]
-
-            if use_subplots and i==0 and fig is None and ax is None:
-                mosaicPlots = []
-                for pto in plotTypeOrder:
-                    mosaicPlots.append([pto])
-                fig, ax = plt.subplot_mosaic(mosaicPlots, gridspec_kw={'hspace':0.3})
-                axis = ax[p]
-            elif use_subplots:
-                with warnings.catch_warnings():
-                    warnings.simplefilter("ignore") #Often warns about xlim when it is not an issue
-                    ax[p].clear()
-                axis = ax[p]
-            else:
-                fig, axis = plt.subplots()
-                    
-            if p == 'hvsr':
-                kwargs['p'] = 'hvsr'
-                _plot_hvsr(hvsr_data, fig=fig, ax=axis, plot_type=plotComponents, xtype='x_freqs', show_legend=show_legend, axes=ax, **kwargs)
-            elif p=='comp':
-                plotComponents[0] = plotComponents[0][:-1]
-                kwargs['p']=='comp'
-                _plot_hvsr(hvsr_data, fig=fig, ax=axis, plot_type=plotComponents, xtype='x_freqs', show_legend=show_legend, axes=ax, **kwargs)
-            elif p=='spec':
-                plottypeKwargs = {}
-                for c in plotComponents:
-                    plottypeKwargs[c] = True
-                kwargs.update(plottypeKwargs)
-                _plot_specgram_hvsr(hvsr_data, fig=fig, ax=axis, colorbar=False, **kwargs)
-            else:
-                warnings.warn('Plot type {p} not recognized', UserWarning)   
-
-        windowsUsedStr = f"{hvsr_data['hvsr_df']['Use'].sum()}/{hvsr_data['hvsr_df'].shape[0]} windows used"
-        fig.text(x=0.98, y=0.02, s=windowsUsedStr, ha='right', va='bottom', fontsize='x-small',
-                 bbox=dict(facecolor='w', edgecolor=None, linewidth=0, alpha=1, pad=9))
-
-        if show:
-            fig.canvas.draw()
-            
-        if return_fig:
-            return fig, ax
-    return
-
+ +
+def plot_outlier_curves(hvsr_data, plot_engine='plotly', rmse_thresh=0.98, use_percentile=True, use_hv_curve=False, from_roc=False, show_plot=True, verbose=False) +
+
+
+
+
+def plot_preview(hv_data, stream=None, preview_fig=None, spectrogram_component='Z', show_plot=True, return_fig=False) +
+
+
+
+
+def plot_results(hv_data, plot_string='HVSR p ann C+ p SPEC ann', results_fig=None, results_graph_widget=None, return_fig=False, show_results_plot=True) +
+
+
-def process_hvsr(hvsr_data, method=3, smooth=True, freq_smooth='konno ohmachi', f_smooth_width=40, resample=True, outlier_curve_rmse_percentile=False, verbose=False) +def process_hvsr(hvsr_data, horizontal_method=None, smooth=True, freq_smooth='konno ohmachi', f_smooth_width=40, resample=True, outlier_curve_rmse_percentile=False, azimuth=None, verbose=False)

Process the input data and get HVSR data

@@ -5815,16 +686,26 @@

Parameters

hvsr_data : HVSRData or HVSRBatch
Data object containing all the parameters input and generated by the user (usually, during sprit.input_params(), sprit.fetch_data(), sprit.generate_ppsds() and/or sprit.remove_noise()).
-
method +
horizontal_method : int or str, default=3
-
Method to use for combining the horizontal components
+
Method to use for combining the horizontal components. Default is 3) Geometric Mean
0) (not used)
-
1) Diffuse field assumption, or 'DFA' (not currently implemented)
-
2) 'Arithmetic Mean': H ≡ (HN + HE)/2
-
3) 'Geometric Mean': H ≡ √HN · HE, recommended by the SESAME project (2004)
-
4) 'Vector Summation': H ≡ √H2 N + H2 E
-
5) 'Quadratic Mean': H ≡ √(H2 N + H2 E )/2
-
6) 'Maximum Horizontal Value': H ≡ max
+
1) 'Diffuse field assumption' +H = √( (eie_E + eie_N) / eie_Z), eie = equal interval energy
+
2) 'Arithmetic Mean' +H ≡ (HN + HE)/2
+
3) 'Geometric Mean' +H ≡ √(HN · HE), recommended by the SESAME project (2004)
+
4) 'Vector Summation' +H ≡ √(HN^2 + HE^2)
+
5) 'Quadratic Mean' +H ≡ √(HN^2 + HE^2)/2
+
6) 'Maximum Horizontal Value' +H ≡ max
+
7) 'Minimum Horizontal Valey' +H ≡ min
+
8) 'Single Azimuth' +H = H2·cos(az) + H1·sin(az)
smooth : bool, default=True
bool or int may be used.
@@ -5853,6 +734,8 @@

Parameters

If False, outlier curve removal is not carried out here. If True, defaults to 98 (98th percentile). Otherwise, float of percentile used as rmse_thresh of remove_outlier_curve().
+
azimuth : float, default = None
+
The azimuth angle to use when method is single azimuth.
verbose : bool, defualt=False
Whether to print output to terminal
@@ -5860,457 +743,21 @@

Returns

hvsr_out    : dict
     Dictionary containing all the information about the data, including input parameters
 
-
- -Expand source code - -
def process_hvsr(hvsr_data, method=3, smooth=True, freq_smooth='konno ohmachi', f_smooth_width=40, resample=True, outlier_curve_rmse_percentile=False, verbose=False):
-    """Process the input data and get HVSR data
-    
-    This is the main function that uses other (private) functions to do 
-    the bulk of processing of the HVSR data and the data quality checks.
-
-    Parameters
-    ----------
-    hvsr_data  : HVSRData or HVSRBatch
-        Data object containing all the parameters input and generated by the user (usually, during sprit.input_params(), sprit.fetch_data(), sprit.generate_ppsds() and/or sprit.remove_noise()).
-    method  : int or str, default=3
-        Method to use for combining the horizontal components
-            0) (not used)
-            1) Diffuse field assumption, or 'DFA' (not currently implemented)
-            2) 'Arithmetic Mean': H ≡ (HN + HE)/2
-            3) 'Geometric Mean': H ≡ √HN · HE, recommended by the SESAME project (2004)
-            4) 'Vector Summation': H ≡ √H2 N + H2 E
-            5) 'Quadratic Mean': H ≡ √(H2 N + H2 E )/2
-            6) 'Maximum Horizontal Value': H ≡ max {HN, HE}
-    smooth  : bool, default=True
-        bool or int may be used. 
-            If True, default to smooth H/V curve to using savgoy filter with window length of 51 (works well with default resample of 1000 pts)
-            If int, the length of the window in the savgoy filter.
-    freq_smooth : str {'konno ohmachi', 'constant', 'proportional'}
-        Which frequency smoothing method to use. By default, uses the 'konno ohmachi' method.
-            - The Konno & Ohmachi method uses the obspy.signal.konnoohmachismoothing.konno_ohmachi_smoothing() function: https://docs.obspy.org/packages/autogen/obspy.signal.konnoohmachismoothing.konno_ohmachi_smoothing.html
-            - The constant method uses a window of constant length f_smooth_width
-            - The proportional method uses a window the percentage length of the frequncy steps/range (f_smooth_width now refers to percentage)
-        See here for more information: https://www.geopsy.org/documentation/geopsy/hv-processing.html
-    f_smooth_width : int, default = 40
-        - For 'konno ohmachi': passed directly to the bandwidth parameter of the konno_ohmachi_smoothing() function, determines the width of the smoothing peak, with lower values resulting in broader peak. Must be > 0.
-        - For 'constant': the size of a triangular smoothing window in the number of frequency steps
-        - For 'proportional': the size of a triangular smoothing window in percentage of the number of frequency steps (e.g., if 1000 frequency steps/bins and f_smooth_width=40, window would be 400 steps wide)
-    resample  : bool, default = True
-        bool or int. 
-            If True, default to resample H/V data to include 1000 frequency values for the rest of the analysis
-            If int, the number of data points to interpolate/resample/smooth the component psd/HV curve data to.
-    outlier_curve_rmse_percentile : bool, float, default = False
-        If False, outlier curve removal is not carried out here. 
-        If True, defaults to 98 (98th percentile). 
-        Otherwise, float of percentile used as rmse_thresh of remove_outlier_curve().
-    verbose : bool, defualt=False
-        Whether to print output to terminal
-
-    Returns
-    -------
-        hvsr_out    : dict
-            Dictionary containing all the information about the data, including input parameters
-
-    """
-    orig_args = locals().copy() #Get the initial arguments
-    start_time = datetime.datetime.now()
-
-    # Update with processing parameters specified previously in input_params, if applicable
-    if 'processing_parameters' in hvsr_data.keys():
-        if 'process_hvsr' in hvsr_data['processing_parameters'].keys():
-            for k, v in hvsr_data['processing_parameters']['process_hvsr'].items():
-                defaultVDict = dict(zip(inspect.getfullargspec(process_hvsr).args[1:], 
-                                        inspect.getfullargspec(process_hvsr).defaults))
-                # Manual input to function overrides the imported parameter values
-                if (not isinstance(v, (HVSRData, HVSRBatch))) and (k in orig_args.keys()) and (orig_args[k]==defaultVDict[k]):
-                    orig_args[k] = v
-                    
-    method = orig_args['method']
-    smooth = orig_args['smooth']
-    freq_smooth = orig_args['freq_smooth']
-    f_smooth_width = orig_args['f_smooth_width']
-    resample = orig_args['resample']
-    outlier_curve_rmse_percentile = orig_args['outlier_curve_rmse_percentile']
-    verbose = orig_args['verbose']
-
-    if (verbose and isinstance(hvsr_data, HVSRBatch)) or (verbose and not hvsr_data['batch']):
-        if isinstance(hvsr_data, HVSRData) and hvsr_data['batch']:
-            pass
-        else:
-            print('\nCalculating Horizontal/Vertical Ratios at all frequencies/time steps (process_hvsr())')
-            print('\tUsing the following parameters:')
-            for key, value in orig_args.items():
-                if key=='hvsr_data':
-                    pass
-                else:
-                    print('\t  {}={}'.format(key, value))
-            print()
-
-    #First, divide up for batch or not
-    #Site is in the keys anytime it's not batch
-    if isinstance(hvsr_data, HVSRBatch):
-        #If running batch, we'll loop through each site
-        hvsr_out = {}
-        for site_name in hvsr_data.keys():
-            args = orig_args.copy() #Make a copy so we don't accidentally overwrite
-            args['hvsr_data'] = hvsr_data[site_name] #Get what would normally be the "hvsr_data" variable for each site
-            if hvsr_data[site_name]['ProcessingStatus']['OverallStatus']:
-                try:
-                    hvsr_out[site_name] = _process_hvsr_batch(**args) #Call another function, that lets us run this function again
-                except:
-                    hvsr_out = hvsr_data
-                    hvsr_out[site_name]['ProcessingStatus']['HVStatus']=False
-                    hvsr_out[site_name]['ProcessingStatus']['OverallStatus'] = False                    
-            else:
-                hvsr_out = hvsr_data
-                hvsr_out[site_name]['ProcessingStatus']['HVStatus']=False
-                hvsr_out[site_name]['ProcessingStatus']['OverallStatus'] = False
-        hvsr_out = HVSRBatch(hvsr_out)
-    else:
-        ppsds = hvsr_data['ppsds'].copy()#[k]['psd_values']
-        ppsds = sprit_utils.check_xvalues(ppsds)
-
-        methodList = ['<placeholder_0>', 'Diffuse Field Assumption', 'Arithmetic Mean', 'Geometric Mean', 'Vector Summation', 'Quadratic Mean', 'Maximum Horizontal Value']
-        x_freqs = {}
-        x_periods = {}
-
-        psdValsTAvg = {}
-        stDev = {}
-        stDevValsP = {}
-        stDevValsM = {}
-        psdRaw={}
-        currTimesUsed={}
-        hvsrDF = hvsr_data['hvsr_df']
-
-        def move_avg(y, box_pts):
-            #box = np.ones(box_pts)/box_pts
-            box = np.hanning(box_pts)
-            y_smooth = np.convolve(y, box, mode='same') / sum(box)
-            return y_smooth
-
-        for k in ppsds.keys():
-            #input_ppsds = ppsds[k]['psd_values'] #original, not used anymore
-            input_ppsds = np.stack(hvsrDF['psd_values_'+k].values)
-
-            currPPSDs = hvsrDF['psd_values_'+k][hvsrDF['Use']].values
-            used_ppsds = np.stack(currPPSDs)
-            
-            #if reasmpling has been selected
-            if resample is True or isinstance(resample, (int, float)):
-                if resample is True:
-                    resample = 1000 #Default smooth value
-
-                #xValMin = min(ppsds[k]['period_bin_centers'])
-                #xValMax = max(ppsds[k]['period_bin_centers'])
-                xValMin = 1/hvsr_data['hvsr_band'][1]
-                xValMax = 1/hvsr_data['hvsr_band'][0]
-                #Resample period bin values
-                x_periods[k] = np.logspace(np.log10(xValMin), np.log10(xValMax), num=resample)
-                if smooth or isinstance(smooth, (int, float)):
-                    if smooth:
-                        smooth = 51 #Default smoothing window
-                        padVal = 25
-                    elif smooth % 2==0:
-                        smooth +1 #Otherwise, needs to be odd
-                        padVal = smooth//2
-                        if padVal %2==0:
-                            padVal += 1
-
-                #Resample raw ppsd values
-                for i, ppsd_t in enumerate(input_ppsds):
-                    if i==0:
-                        psdRaw[k] = np.interp(x_periods[k], ppsds[k]['period_bin_centers'], ppsd_t)
-                        if smooth is not False:
-                            padRawKPad = np.pad(psdRaw[k], [padVal, padVal], mode='reflect')
-                            #padRawKPadSmooth = scipy.signal.savgol_filter(padRawKPad, smooth, 3)
-                            padRawKPadSmooth = move_avg(padRawKPad, smooth)
-                            psdRaw[k] = padRawKPadSmooth[padVal:-padVal]
-
-                    else:
-                        psdRaw[k] = np.vstack((psdRaw[k], np.interp(x_periods[k], ppsds[k]['period_bin_centers'], ppsd_t)))
-                        if smooth is not False:
-                            padRawKiPad = np.pad(psdRaw[k][i], [padVal, padVal], mode='reflect')
-                            #padRawKiPadSmooth = scipy.signal.savgol_filter(padRawKiPad, smooth, 3)
-                            padRawKiPadSmooth = move_avg(padRawKiPad, smooth)
-                            psdRaw[k][i] = padRawKiPadSmooth[padVal:-padVal]
-
-            else:
-                #If no resampling desired
-                #x_periods[k] = np.array(ppsds[k]['period_bin_centers'])
-                x_periods[k] = np.round([1/p for p in hvsr_data['ppsds'][k]['period_xedges'][:-1]],3)
-                x_periods[k][0] = hvsr_data['hvsr_band'][1]
-                x_periods[k][-1] = hvsr_data['hvsr_band'][0]
-                psdRaw[k] = np.array(input_ppsds)
-
-            hvsrDF['psd_values_'+k] = list(psdRaw[k])
-
-            #Get average psd value across time for each channel (used to calc main H/V curve)
-            psdValsTAvg[k] = np.nanmean(np.array(psdRaw[k]), axis=0)
-            x_freqs[k] = np.array([1/p for p in x_periods[k]]) #np.divide(np.ones_like(x_periods[k]), x_periods[k]) 
-            stDev[k] = np.std(psdRaw[k], axis=0)
-            stDevValsM[k] = np.array(psdValsTAvg[k] - stDev[k])
-            stDevValsP[k] = np.array(psdValsTAvg[k] + stDev[k])
-
-            currTimesUsed[k] = np.array(hvsrDF['TimesProcessed_Obspy'][hvsrDF['Use']].values)
-            #currTimesUsed[k] = ppsds[k]['current_times_used'] #original one
-        
-        #Get string of method type
-        if type(method) is int:
-            methodInt = method
-            method = methodList[method]
-        hvsr_data['method'] = method
-
-        #This gets the main hvsr curve averaged from all time steps
-        anyK = list(x_freqs.keys())[0]
-        hvsr_curve, _ = __get_hvsr_curve(x=x_freqs[anyK], psd=psdValsTAvg, method=methodInt, hvsr_data=hvsr_data, verbose=verbose)
-        origPPSD = hvsr_data['ppsds_obspy'].copy()
-
-
-        #Add some other variables to our output dictionary
-        hvsr_dataUpdate = {'input_params':hvsr_data,
-                    'x_freqs':x_freqs,
-                    'hvsr_curve':hvsr_curve,
-                    'x_period':x_periods,
-                    'psd_raw':psdRaw,
-                    'current_times_used': currTimesUsed,
-                    'psd_values_tavg':psdValsTAvg,
-                    'ppsd_std':stDev,
-                    'ppsd_std_vals_m':stDevValsM,
-                    'ppsd_std_vals_p':stDevValsP,
-                    'method':method,
-                    'ppsds':ppsds,
-                    'ppsds_obspy':origPPSD,
-                    'tsteps_used': hvsr_data['tsteps_used'].copy(),
-                    'hvsr_df':hvsr_data['hvsr_df']
-                    }
-        
-        hvsr_out = HVSRData(hvsr_dataUpdate)
-
-        #This is if manual editing was used (should probably be updated at some point to just use masks)
-        if 'xwindows_out' in hvsr_data.keys():
-            hvsr_out['xwindows_out'] = hvsr_data['xwindows_out']
-        else:
-            hvsr_out['xwindows_out'] = []
-
-
-        freq_smooth_ko = ['konno ohmachi', 'konno-ohmachi', 'konnoohmachi', 'konnohmachi', 'ko', 'k']
-        freq_smooth_constant = ['constant', 'const', 'c']
-        freq_smooth_proport = ['proportional', 'proportion', 'prop', 'p']
-
-        #Frequency Smoothing
-        if not freq_smooth:
-            if verbose:
-                warnings.warn('No frequency smoothing is being applied. This is not recommended for noisy datasets.')
-        elif freq_smooth is True or freq_smooth.lower() in freq_smooth_ko:
-            from obspy.signal import konnoohmachismoothing
-            for k in hvsr_out['psd_raw']:
-                colName = f'psd_values_{k}'
-
-                ppsd_data = np.stack(hvsr_out['hvsr_df'][colName])
-                ppsd_data = hvsr_out['psd_raw'][k]
-
-
-                freqs = hvsr_out['x_freqs'][k]
-                padding_length = int(f_smooth_width)
-
-                padding_value_R = np.nanmean(ppsd_data[:,-1*padding_length:])
-                padding_value_L = np.nanmean(ppsd_data[:,:padding_length])
-
-                # Pad the data to prevent boundary anamolies
-                padded_ppsd_data = np.pad(ppsd_data, ((0, 0), (padding_length, padding_length)), 
-                                          'constant', constant_values=(padding_value_L, padding_value_R))
-
-                # Pad the frequencies
-                ratio = freqs[1] / freqs[0]
-                # Generate new elements on either side and combine
-                left_padding = [freqs[0] / (ratio ** i) for i in range(padding_length, 0, -1)]
-                right_padding = [freqs[-1] * (ratio ** i) for i in range(1, padding_length + 1)]
-                padded_freqs = np.concatenate([left_padding, freqs, right_padding])
-                
-                #Filter out UserWarning for just this method, since it throws up a UserWarning that doesn't really matter about dtypes often
-                with warnings.catch_warnings():
-                    warnings.simplefilter('ignore', category=UserWarning)
-                    smoothed_ppsd_data = konnoohmachismoothing.konno_ohmachi_smoothing(padded_ppsd_data, 
-                                                    padded_freqs, bandwidth=f_smooth_width, normalize=True)
-                
-                #Just use the original data
-                smoothed_ppsd_data = smoothed_ppsd_data[:,padding_length:-1*padding_length]
-                hvsr_out['psd_raw'][k] = smoothed_ppsd_data
-                hvsr_out['hvsr_df'][colName] = pd.Series(list(smoothed_ppsd_data), index=hvsr_out['hvsr_df'].index)
-
-        elif freq_smooth.lower() in freq_smooth_constant:
-            hvsr_out = __freq_smooth_window(hvsr_out, f_smooth_width, kind_freq_smooth='constant')
-        elif freq_smooth.lower() in freq_smooth_proport:
-            hvsr_out = __freq_smooth_window(hvsr_out, f_smooth_width, kind_freq_smooth='proportional')
-        else:
-            if verbose:
-                warnings.warn(f'You indicated no frequency smoothing should be applied (freq_smooth = {freq_smooth}). This is not recommended for noisy datasets.')
-
-        #Get hvsr curve from three components at each time step
-        anyK = list(hvsr_out['psd_raw'].keys())[0]
-        if method==1 or method =='dfa' or method =='Diffuse Field Assumption':
-            pass ###UPDATE HERE NEXT???__get_hvsr_curve(x=hvsr_out['x_freqs'][anyK], psd=tStepDict, method=methodInt, hvsr_data=hvsr_out, verbose=verbose)
-        else:
-            hvsr_tSteps = []
-            for tStep in range(len(hvsr_out['psd_raw'][anyK])):
-                tStepDict = {}
-                for k in hvsr_out['psd_raw']:
-                    tStepDict[k] = hvsr_out['psd_raw'][k][tStep]
-                hvsr_tstep, _ = __get_hvsr_curve(x=hvsr_out['x_freqs'][anyK], psd=tStepDict, method=methodInt, hvsr_data=hvsr_out, verbose=verbose)
-                hvsr_tSteps.append(np.float32(hvsr_tstep)) #Add hvsr curve for each time step to larger list of arrays with hvsr_curves
-        hvsr_out['hvsr_df']['HV_Curves'] = hvsr_tSteps
-
-        hvsr_out['ind_hvsr_curves'] = np.stack(hvsr_out['hvsr_df']['HV_Curves'][hvsr_out['hvsr_df']['Use']])
-        #hvsr_out['ind_hvsr_curves'] = np.array(hvsr_tSteps)
-
-        #Initialize array based only on the curves we are currently using
-        indHVCurvesArr = np.stack(hvsr_out['hvsr_df']['HV_Curves'][hvsr_out['hvsr_df']['Use']])
-        #indHVCurvesArr = hvsr_out['ind_hvsr_curves']
-
-        if outlier_curve_rmse_percentile:
-            if outlier_curve_rmse_percentile is True:
-                outlier_curve_rmse_percentile = 98
-            hvsr_out = remove_outlier_curves(hvsr_out, use_percentile=True, rmse_thresh=outlier_curve_rmse_percentile, use_hv_curve=True, verbose=verbose)
-  
-        hvsr_out['ind_hvsr_stdDev'] = np.nanstd(indHVCurvesArr, axis=0)
-
-        #Get peaks for each time step
-        tStepPeaks = []
-        for tStepHVSR in hvsr_tSteps:
-            tStepPeaks.append(__find_peaks(tStepHVSR))
-        hvsr_out['ind_hvsr_peak_indices'] = tStepPeaks
-        hvsr_out['hvsr_df']['CurvesPeakIndices'] = tStepPeaks
-
-        tStepPFList = []
-        for tPeaks in tStepPeaks:
-            tStepPFs = []
-            for pInd in tPeaks:
-                tStepPFs.append(np.float32(hvsr_out['x_freqs'][anyK][pInd]))
-            tStepPFList.append(tStepPFs)
-        hvsr_out['hvsr_df']['CurvesPeakFreqs'] = tStepPFList
-
-        #Get peaks of main HV curve
-        hvsr_out['hvsr_peak_indices'] = __find_peaks(hvsr_out['hvsr_curve'])
-        
-        #Get frequency values at HV peaks in main curve
-        hvsrPF=[]
-        for p in hvsr_out['hvsr_peak_indices']:
-            hvsrPF.append(hvsr_out['x_freqs'][anyK][p])
-        hvsr_out['hvsr_peak_freqs'] = np.array(hvsrPF)
-
-
-        #Get other HVSR parameters (i.e., standard deviations, etc.)
-        hvsr_out = __gethvsrparams(hvsr_out)
-
-        #Include the original obspy stream in the output
-        hvsr_out['input_stream'] = hvsr_dataUpdate['input_params']['input_stream'] #input_stream
-        hvsr_out = sprit_utils.make_it_classy(hvsr_out)
-        hvsr_out['ProcessingStatus']['HVStatus'] = True
-
-        if 'processing_parameters' not in hvsr_out.keys():
-            hvsr_out['processing_parameters'] = {}
-        hvsr_out['processing_parameters']['generate_ppsds'] = {}
-        for key, value in orig_args.items():
-            hvsr_out['processing_parameters']['generate_ppsds'][key] = value
-
-    hvsr_out = _check_processing_status(hvsr_out, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose)
-
-    return hvsr_out
-
def read_from_RS(dest, src='SHAKENAME@HOSTNAME:/opt/data/archive/YEAR/AM/STATION/', opts='az', username='myshake', password='shakeme', hostname='rs.local', year='2023', sta='RAC84', sleep_time=0.1, verbose=True, save_progress=True, method='scp')
-
- -Expand source code - -
def read_from_RS(dest, src='SHAKENAME@HOSTNAME:/opt/data/archive/YEAR/AM/STATION/', opts='az', username='myshake', password='shakeme',hostname='rs.local', year='2023', sta='RAC84',sleep_time=0.1, verbose=True, save_progress=True, method='scp'):
-    src = src.replace('SHAKENAME', username)
-    src = src.replace('SHAKENAME', hostname)
-    src = src.replace('YEAR', year)
-    src = src.replace('STATION', sta)
-
-    if method == 'src':
-        """This does not work from within a virtual environment!!!!"""
-        #import pexpect
-        import sys
-        #from pexpect import popen_spawn
-        import time
-        import wexpect
-
-        scp_command = 'scp -r {} "{}"'.format(src, dest)
-
-        print('Command:', scp_command)
-        child = wexpect.spawn(scp_command, timeout=5)
-
-        child.expect("password:")
-        child.sendline(password)
-
-        child.expect(wexpect.EOF)
-
-        print("Files have been successfully transferred to {}!".format(dest))
-    elif method=='rsync':
-        if verbose:
-            opts = opts + 'v'
-        if save_progress:
-            opts = opts + 'p'   
-
-        #import subprocess
-        #subprocess.run(["rsync", "-"+opts, src, dest])
-        #subprocess.run(["rsync", "-"+opts, src, dest])
-
-        import pty
-        #Test, from https://stackoverflow.com/questions/13041732/ssh-password-through-python-subprocess
-        command = [
-            'rsync',
-            "-"+opts,
-            src,
-            dest
-            #'{0}@{1}'.format(shakename, hostname),
-            #'-o', 'NumberOfPasswordPrompts=1',
-            #'sleep {0}'.format(sleep_time),
-        ]
-
-        # PID = 0 for child, and the PID of the child for the parent    
-        pid, child_fd = pty.fork()
-
-        if not pid: # Child process
-            # Replace child process with our SSH process
-            os.execv(command[0], command)
-
-        while True:
-            output = os.read(child_fd, 1024).strip()
-            lower = output.lower()
-            # Write the password
-            if lower.endswith('password:'):
-                os.write(child_fd, password + '\n')
-                break
-            elif 'are you sure you want to continue connecting' in lower:
-                # Adding key to known_hosts
-                os.write(child_fd, 'yes\n')
-            elif 'company privacy warning' in lower:
-                pass # This is an understood message
-            else:
-                print("SSH Connection Failed",
-                    "Encountered unrecognized message when spawning "
-                    "the SSH tunnel: '{0}'".format(output))
-
-    return dest
-
-def read_tromino_files(datapath, params, sampling_rate=128, start_byte=24576, verbose=False, **kwargs) +def read_tromino_files(input_data, params, struct_format='H', sampling_rate=128, start_byte=24576, verbose=False, **kwargs)

Function to read data from tromino. Specifically, this has been lightly tested on Tromino 3G+ machines

Parameters

-
datapath : str, pathlib.Path()
+
input_data : str, pathlib.Path()
The input parameter datapath from sprit.input_params()
params : HVSRData or HVSRBatch
The parameters as read in from input_params() and and fetch_data()
@@ -6322,100 +769,9 @@

Returns

obspy.Stream
An obspy.Stream object containing the trace data from the Tromino instrument
-
- -Expand source code - -
def read_tromino_files(datapath, params, sampling_rate=128, start_byte=24576, verbose=False, **kwargs):
-    """Function to read data from tromino. Specifically, this has been lightly tested on Tromino 3G+ machines
-
-    Parameters
-    ----------
-    datapath : str, pathlib.Path()
-        The input parameter _datapath_ from sprit.input_params()
-    params : HVSRData or HVSRBatch
-        The parameters as read in from input_params() and and fetch_data()
-    verbose : bool, optional
-        Whether to print results to terminal, by default False
-
-    Returns
-    -------
-    obspy.Stream
-        An obspy.Stream object containing the trace data from the Tromino instrument
-    """
-    dPath = datapath
-
-    strucSizes = {'c':1, 'b':1,'B':1, '?':1,
-                'h':2,'H':2,'e':2,
-                'i':4,'I':4,'l':4,'L':4,'f':4,
-                'q':8,'Q':8,'d':8,
-                'n':8,'N':8,'s':16,'p':16,'P':16,'x':16}
-
-    #H (pretty sure it's Q) I L or Q all seem to work (probably not Q?)
-    structFormat = 'H'
-    structSize = strucSizes[structFormat]
-
-    dataList = []
-    with open(dPath, 'rb') as f:
-        while True:
-            data = f.read(structSize)  # Read 4 bytes
-            if not data:  # End of file
-                break
-            value = struct.unpack(structFormat, data)[0]  # Interpret as a float
-            dataList.append(value)
-     
-    import numpy as np
-    dataArr = np.array(dataList)
-    import matplotlib.pyplot as plt
-
-    medVal = np.nanmedian(dataArr[50000:100000])
-
-    if 'start_byte' in kwargs.keys():
-        start_byte = kwargs['start_byte']
-
-    startByte = start_byte
-    comp1 = dataArr[startByte::3] - medVal
-    comp2 = dataArr[startByte+1::3] - medVal
-    comp3 = dataArr[startByte+2::3] - medVal
-    headerBytes = dataArr[:startByte]
-
-    #fig, ax = plt.subplots(3, sharex=True, sharey=True)
-    #ax[0].plot(comp1, linewidth=0.1, c='k')
-    #ax[1].plot(comp2, linewidth=0.1, c='k')
-    #ax[2].plot(comp3, linewidth=0.1, c='k')
-
-    if 'sampling_rate' in kwargs.keys():
-        sampling_rate = kwargs['sampling_rate']
-
-    sTime = obspy.UTCDateTime(params['acq_date'].year, params['acq_date'].month, params['acq_date'].day,
-                              params['starttime'].hour, params['starttime'].minute,
-                              params['starttime'].second,params['starttime'].microsecond)
-    eTime = sTime + (((len(comp1))/sampling_rate)/60)*60
-
-    traceHeader1 = {'sampling_rate':sampling_rate,
-            'calib' : 1,
-            'npts':len(comp1),
-            'network':'AM',
-            'location':'00',
-            'station' : 'TRMNO',
-            'channel':'BHE',
-            'starttime':sTime}
-    
-    traceHeader2=traceHeader1.copy()
-    traceHeader3=traceHeader1.copy()
-    traceHeader2['channel'] = 'BHN'
-    traceHeader3['channel'] = 'BHZ'
-
-    trace1 = obspy.Trace(data=comp1, header=traceHeader1)
-    trace2 = obspy.Trace(data=comp2, header=traceHeader2)
-    trace3 = obspy.Trace(data=comp3, header=traceHeader3)
-
-    st = obspy.Stream([trace1, trace2, trace3])    
-    return st
-
-def remove_noise(hvsr_data, remove_method='auto', sat_percent=0.995, noise_percent=0.8, sta=2, lta=30, stalta_thresh=[0.5, 5], warmup_time=0, cooldown_time=0, min_win_size=1, remove_raw_noise=False, verbose=False) +def remove_noise(hvsr_data, remove_method=None, processing_window=None, sat_percent=0.995, noise_percent=0.8, sta=2, lta=30, stalta_thresh=[8, 16], warmup_time=0, cooldown_time=0, min_win_size=1, remove_raw_noise=False, show_stalta_plot=False, verbose=False)

Function to remove noisy windows from data, using various methods.

@@ -6432,6 +788,10 @@

Parameters

remove_method : str, {'auto', 'manual', 'stalta'/'antitrigger', 'saturation threshold', 'noise threshold', 'warmup'/'cooldown'/'buffer'/'warm_cool'}
The different methods for removing noise from the dataset. A list of strings will also work, in which case, it should be a list of the above strings. See descriptions above for what how each method works. By default 'auto.' If remove_method='auto', this is the equivalent of remove_method=['noise threshold', 'antitrigger', 'saturation threshold', 'warm_cool']
+
processing_window : list, tuple, or None
+
A list/tuple of two items [s, e] or a list/tuple of two-item lists/tuples [[s0, e0], [s1,e1],…[sn, en]] with start and end time(s) for windows to keep for processing. +Data outside of these times will be excluded from processing. +Times should be obspy.UTCDateTime objects to ensure precision, but time strings ("13:05") will also work in most cases (excpetions may be when the data stream starts/ends on different UTC days)
sat_percent : float, default=0.995
Percentage (between 0 and 1), to use as the threshold at which to remove data. This is used in the saturation method. By default 0.995. If a value is passed that is greater than 1, it will be divided by 100 to obtain the percentage.
@@ -6445,266 +805,24 @@

Parameters

stalta_thresh : list, default=[0.5,5]
Two-item list or tuple with the thresholds for the stalta antitrigger. The first value (index [0]) is the lower threshold, the second value (index [1] is the upper threshold), by default [0.5,5]
warmup_time : int, default=0
-
Time in seconds to allow for warmup of the instrument (or while operator is still near instrument). This will renove any data before this time, by default 0.
-
cooldown_time : int, default=0
-
Time in seconds to allow for cooldown of the instrument (or for when operator is nearing instrument). This will renove any data before this time, by default 0.
-
min_win_size : float, default=1
-
The minumum size a window must be over specified threshold (in seconds) for it to be removed
-
remove_raw_noise : bool, default=False
-
If remove_raw_noise=True, will perform operation on raw data ('input_stream'), rather than potentially already-modified data ('stream').
-
verbose : bool, default=False
-
Whether to print status of remove_noise
- -

Returns

-
-
output : dict
-
Dictionary similar to hvsr_data, but containing modified data with 'noise' removed
-
-
- -Expand source code - -
def remove_noise(hvsr_data, remove_method='auto', sat_percent=0.995, noise_percent=0.80, sta=2, lta=30, stalta_thresh=[0.5,5], warmup_time=0, cooldown_time=0, min_win_size=1, remove_raw_noise=False, verbose=False):
-    """Function to remove noisy windows from data, using various methods.
-    
-    Methods include 
-    - Manual window selection (by clicking on a chart with spectrogram and stream data), 
-    - Auto window selection, which does the following two in sequence (these can also be done indepently):
-        - A sta/lta "antitrigger" method (using stalta values to automatically remove triggered windows where there appears to be too much noise)
-        - A noise threshold method, that cuts off all times where the noise threshold equals more than (by default) 80% of the highest amplitude noise sample for the length specified by lta (in seconds)
-        - A saturation threshold method, that cuts off all times where the noise threshold equals more than (by default) 99.5% of the highest amplitude noise sample.
-
-    Parameters
-    ----------
-    hvsr_data : dict, obspy.Stream, or obspy.Trace
-        Dictionary containing all the data and parameters for the HVSR analysis
-    remove_method : str, {'auto', 'manual', 'stalta'/'antitrigger', 'saturation threshold', 'noise threshold', 'warmup'/'cooldown'/'buffer'/'warm_cool'}
-        The different methods for removing noise from the dataset. A list of strings will also work, in which case, it should be a list of the above strings. See descriptions above for what how each method works. By default 'auto.'
-        If remove_method='auto', this is the equivalent of remove_method=['noise threshold', 'antitrigger', 'saturation threshold', 'warm_cool']
-    sat_percent : float, default=0.995
-        Percentage (between 0 and 1), to use as the threshold at which to remove data. This is used in the saturation method. By default 0.995. 
-        If a value is passed that is greater than 1, it will be divided by 100 to obtain the percentage.
-    noise_percent : float, default = 0.8
-        Percentage (between 0 and 1), to use as the threshold at which to remove data, if it persists for longer than time (in seconds (specified by min_win_size)). This is used in the noise threshold method. By default 0.8. 
-        If a value is passed that is greater than 1, it will be divided by 100 to obtain the percentage.
-    sta : int, optional
-        Short term average (STA) window (in seconds), by default 2. For use with sta/lta antitrigger method.
-    lta : int, optional
-        Long term average (STA) window (in seconds), by default 30. For use with sta/lta antitrigger method.
-    stalta_thresh : list, default=[0.5,5]
-        Two-item list or tuple with the thresholds for the stalta antitrigger. The first value (index [0]) is the lower threshold, the second value (index [1] is the upper threshold), by default [0.5,5]
-    warmup_time : int, default=0
-        Time in seconds to allow for warmup of the instrument (or while operator is still near instrument). This will renove any data before this time, by default 0.
-    cooldown_time : int, default=0
-        Time in seconds to allow for cooldown of the instrument (or for when operator is nearing instrument). This will renove any data before this time, by default 0.
-    min_win_size : float, default=1
-        The minumum size a window must be over specified threshold (in seconds) for it to be removed
-    remove_raw_noise : bool, default=False
-        If remove_raw_noise=True, will perform operation on raw data ('input_stream'), rather than potentially already-modified data ('stream').
-    verbose : bool, default=False
-        Whether to print status of remove_noise
-
-    Returns
-    -------
-    output : dict
-        Dictionary similar to hvsr_data, but containing modified data with 'noise' removed
-    """
-    #Get intput paramaters
-    orig_args = locals().copy()
-    start_time = datetime.datetime.now()
-    
-    # Update with processing parameters specified previously in input_params, if applicable
-    if 'processing_parameters' in hvsr_data.keys():
-        if 'remove_noise' in hvsr_data['processing_parameters'].keys():
-            for k, v in hvsr_data['processing_parameters']['remove_noise'].items():
-                defaultVDict = dict(zip(inspect.getfullargspec(remove_noise).args[1:], 
-                                        inspect.getfullargspec(remove_noise).defaults))
-                # Manual input to function overrides the imported parameter values
-                if (not isinstance(v, (HVSRData, HVSRBatch))) and (k in orig_args.keys()) and (orig_args[k]==defaultVDict[k]):
-                    orig_args[k] = v
-
-    remove_method = orig_args['remove_method']
-    sat_percent = orig_args['sat_percent']
-    noise_percent = orig_args['noise_percent']
-    sta = orig_args['sta']
-    lta = orig_args['lta']
-    stalta_thresh = orig_args['stalta_thresh']
-    warmup_time = orig_args['warmup_time']
-    cooldown_time = orig_args['cooldown_time']
-    min_win_size = orig_args['min_win_size']
-    remove_raw_noise = orig_args['remove_raw_noise']
-    verbose = orig_args['verbose']
-
-    if (verbose and isinstance(hvsr_data, HVSRBatch)) or (verbose and not hvsr_data['batch']):
-        if isinstance(hvsr_data, HVSRData) and hvsr_data['batch']:
-            pass
-        else:
-            print('\nRemoving noisy data windows (remove_noise())')
-            print('\tUsing the following parameters:')
-            for key, value in orig_args.items():
-                if key=='hvsr_data':
-                    pass
-                else:
-                    print('\t  {}={}'.format(key, value))
-
-    #Setup lists
-    manualList = ['manual', 'man', 'm', 'window', 'windows', 'w']
-    autoList = ['auto', 'automatic', 'all', 'a']
-    antitrigger = ['stalta', 'anti', 'antitrigger', 'trigger', 'at']
-    saturationThresh = ['saturation threshold', 'saturation', 'sat', 's']
-    noiseThresh = ['noise threshold', 'noise', 'threshold', 'n']
-    warmup_cooldown=['warmup', 'cooldown', 'warm', 'cool', 'buffer', 'warmup-cooldown', 'warmup_cooldown', 'wc', 'warm_cool', 'warm-cool']
-
-    #Get Stream from hvsr_data
-    if isinstance(hvsr_data, HVSRBatch):
-        #If running batch, we'll loop through each site
-        hvsr_out = {}
-        for site_name in hvsr_data.keys():
-            args = orig_args.copy() #Make a copy so we don't accidentally overwrite
-            args['hvsr_data'] = hvsr_data[site_name] #Get what would normally be the "hvsr_data" variable for each site
-            if hvsr_data[site_name]['ProcessingStatus']['OverallStatus']:
-                try:
-                   hvsr_out[site_name] = __remove_noise_batch(**args) #Call another function, that lets us run this function again
-                except Exception as e:
-                    hvsr_out[site_name]['ProcessingStatus']['RemoveNoiseStatus']=False
-                    hvsr_out[site_name]['ProcessingStatus']['OverallStatus']=False
-                    if verbose:
-                        print(e)
-            else:
-                hvsr_data[site_name]['ProcessingStatus']['RemoveNoiseStatus']=False
-                hvsr_data[site_name]['ProcessingStatus']['OverallStatus']=False
-                hvsr_out = hvsr_data
-
-        output = HVSRBatch(hvsr_out)
-        return output
-    elif isinstance(hvsr_data, (HVSRData, dict, obspy.Stream, obspy.Trace)):
-        if isinstance(hvsr_data, (HVSRData, dict)):
-            if remove_raw_noise:
-                inStream = hvsr_data['input_stream'].copy()
-            else:
-                inStream = hvsr_data['stream'].copy()
-            output = hvsr_data#.copy()
-        else:
-            inStream = hvsr_data.copy()
-            output = inStream.copy()
-
-        outStream = inStream
-        
-        if isinstance(remove_method, str):
-            if ',' in remove_method:
-                remove_method = remove_method.split(',')
-            else:
-                remove_method = [remove_method]
-        elif isinstance(remove_method, (list, tuple)):
-            pass
-        elif not remove_method:
-            remove_method=[None]
-        else:
-            warnings.warn(f"Input value remove_method={remove_method} must be either string, list of strings, None, or False. No noise removal will be carried out. Please choose one of the following: 'manual', 'auto', 'antitrigger', 'noise threshold', 'warmup_cooldown'.")
-            return output
-            
-        #Reorder list so manual is always first
-        if len(set(remove_method).intersection(manualList)) > 0:
-            manInd = list(set(remove_method).intersection(manualList))[0]
-            remove_method.remove(manInd)
-            remove_method.insert(0, manInd)
-            
-        #Go through each type of removal and remove
-        for rem_kind in remove_method:
-            if not rem_kind:
-                break
-            elif rem_kind.lower() in manualList:
-                if isinstance(output, (HVSRData, dict)):
-                    if 'xwindows_out' in output.keys():
-                        pass
-                    else:
-                        output = _select_windows(output)
-                    window_list = output['xwindows_out']
-                if isinstance(outStream, obspy.core.stream.Stream):
-                    if window_list is not None:
-                        output['stream'] = __remove_windows(inStream, window_list, warmup_time)
-                    else:
-                        output = _select_windows(output)
-                elif isinstance(output, (HVSRData, dict)):
-                    pass
-                else:
-                    RuntimeError("Only obspy.core.stream.Stream data type is currently supported for manual noise removal method.")     
-            elif rem_kind.lower() in autoList:
-                outStream = __remove_noise_thresh(outStream, noise_percent=noise_percent, lta=lta, min_win_size=min_win_size)
-                outStream = __remove_anti_stalta(outStream, sta=sta, lta=lta, thresh=stalta_thresh)
-                outStream = __remove_noise_saturate(outStream, sat_percent=sat_percent, min_win_size=min_win_size)
-                outStream = __remove_warmup_cooldown(stream=outStream, warmup_time=warmup_time, cooldown_time=cooldown_time)
-            elif rem_kind.lower() in antitrigger:
-                outStream = __remove_anti_stalta(outStream, sta=sta, lta=lta, thresh=stalta_thresh)
-            elif rem_kind.lower() in saturationThresh:
-                outStream = __remove_noise_saturate(outStream, sat_percent=sat_percent, min_win_size=min_win_size)
-            elif rem_kind.lower() in noiseThresh:
-                outStream = __remove_noise_thresh(outStream, noise_percent=noise_percent, lta=lta, min_win_size=min_win_size)
-            elif rem_kind.lower() in warmup_cooldown:
-                outStream = __remove_warmup_cooldown(stream=outStream, warmup_time=warmup_time, cooldown_time=cooldown_time)
-            else:
-                if len(remove_method)==1:
-                    warnings.warn(f"Input value remove_method={remove_method} is not recognized. No noise removal will be carried out. Please choose one of the following: 'manual', 'auto', 'antitrigger', 'noise threshold', 'warmup_cooldown'.")
-                    break
-                warnings.warn(f"Input value remove_method={remove_method} is not recognized. Continuing with other noise removal methods.")
-
-        #Add output
-        if isinstance(output, (HVSRData, dict)):
-            if isinstance(outStream, (obspy.Stream, obspy.Trace)):
-                output['stream'] = outStream
-            else:
-                output['stream'] = outStream['stream']
-            output['input_stream'] = hvsr_data['input_stream']
-            
-            if 'processing_parameters' not in output.keys():
-                output['processing_parameters'] = {}
-            output['processing_parameters']['remove_noise'] = {}
-            for key, value in orig_args.items():
-                output['processing_parameters']['remove_noise'][key] = value
-            
-            output['ProcessingStatus']['RemoveNoiseStatus'] = True
-            output = _check_processing_status(output, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose)
-
-            if 'hvsr_df' in output.keys() or ('params' in output.keys() and 'hvsr_df' in output['params'].keys())or ('input_params' in output.keys() and 'hvsr_df' in output['input_params'].keys()):
-                hvsrDF = output['hvsr_df']
-                
-                outStream = output['stream'].split()
-                for i, trace in enumerate(outStream):
-                    if i ==0:
-                        trEndTime = trace.stats.endtime
-                        comp_end = trace.stats.component
-                        continue
-                    trStartTime = trace.stats.starttime
-                    comp_start = trace.stats.component
-                    
-                    if trEndTime < trStartTime and comp_end==comp_start:
-                        gap = [trEndTime,trStartTime]
-
-                        output['hvsr_df']['Use'] = (hvsrDF['TimesProcessed_Obspy'].gt(gap[0]) & hvsrDF['TimesProcessed_Obspy'].gt(gap[1]) )| \
-                                        (hvsrDF['TimesProcessed_ObspyEnd'].lt(gap[0]) & hvsrDF['TimesProcessed_ObspyEnd'].lt(gap[1]))# | \
-                        output['hvsr_df']['Use'] = output['hvsr_df']['Use'].astype(bool)
-                    
-                    trEndTime = trace.stats.endtime
-                
-                outStream.merge()
-                output['stream'] = outStream
-                    
-        elif isinstance(hvsr_data, obspy.Stream) or isinstance(hvsr_data, obspy.Trace):
-            output = outStream
-        else:
-            warnings.warn(f"Output of type {type(output)} for this function will likely result in errors in other processing steps. Returning hvsr_data data.")
-            return hvsr_data
-        output = sprit_utils.make_it_classy(output)
-        if 'xwindows_out' not in output.keys():
-            output['xwindows_out'] = []
-    else:
-        RuntimeError(f"Input of type type(hvsr_data)={type(hvsr_data)} cannot be used.")
-    
-    return output
-
+
Time in seconds to allow for warmup of the instrument (or while operator is still near instrument). This will renove any data before this time, by default 0.
+
cooldown_time : int, default=0
+
Time in seconds to allow for cooldown of the instrument (or for when operator is nearing instrument). This will renove any data before this time, by default 0.
+
min_win_size : float, default=1
+
The minumum size a window must be over specified threshold (in seconds) for it to be removed
+
remove_raw_noise : bool, default=False
+
If remove_raw_noise=True, will perform operation on raw data ('input_stream'), rather than potentially already-modified data ('stream').
+
verbose : bool, default=False
+
Whether to print status of remove_noise
+ +

Returns

+
+
output : dict
+
Dictionary similar to hvsr_data, but containing modified data with 'noise' removed
+
-def remove_outlier_curves(hvsr_data, rmse_thresh=98, use_percentile=True, use_hv_curve=False, show_outlier_plot=False, verbose=False) +def remove_outlier_curves(hvsr_data, rmse_thresh=98, use_percentile=True, use_hv_curve=False, plot_engine='matplotlib', show_plot=False, verbose=False)

Function used to remove outliers curves using Root Mean Square Error to calculate the error of each windowed @@ -6734,241 +852,285 @@

Returns

hvsr_data : dict
Input dictionary with values modified based on work of function.
-
- -Expand source code - -
def remove_outlier_curves(hvsr_data, rmse_thresh=98, use_percentile=True, use_hv_curve=False, show_outlier_plot=False, verbose=False):
-    """Function used to remove outliers curves using Root Mean Square Error to calculate the error of each windowed
-    Probabilistic Power Spectral Density (PPSD) curve against the median PPSD value at each frequency step for all times.
-    It calculates the RMSE for the PPSD curves of each component individually. All curves are removed from analysis.
-    
-    Some abberant curves often occur due to the remove_noise() function, so this should be run some time after remove_noise(). 
-    In general, the recommended workflow is to run this immediately following the generate_ppsds() function.
-
-    Parameters
-    ----------
-    hvsr_data : dict
-        Input dictionary containing all the values and parameters of interest
-    rmse_thresh : float or int, default=98
-        The Root Mean Square Error value to use as a threshold for determining whether a curve is an outlier. 
-        This averages over each individual entire curve so that curves with very abberant data (often occurs when using the remove_noise() method), can be identified.
-        Otherwise, specify a float or integer to use as the cutoff RMSE value (all curves with RMSE above will be removed)
-    use_percentile :  float, default=True
-        Whether rmse_thresh should be interepreted as a raw RMSE value or as a percentile of the RMSE values.
-    use_hv_curve : bool, default=False
-        Whether to use the calculated HV Curve or the individual components. This can only be True after process_hvsr() has been run.
-    show_plot : bool, default=False
-        Whether to show a plot of the removed data
-    verbose : bool, default=False
-        Whether to print output of function to terminal
-
-    Returns
-    -------
-    hvsr_data : dict
-        Input dictionary with values modified based on work of function.
-    """
-    # Setup function
-    #Get intput paramaters
-    orig_args = locals().copy()
-    start_time = datetime.datetime.now()
-    
-    # Update with processing parameters specified previously in input_params, if applicable
-    if 'processing_parameters' in hvsr_data.keys():
-        if 'remove_outlier_curves' in hvsr_data['processing_parameters'].keys():
-            for k, v in hvsr_data['processing_parameters']['remove_noise'].items():
-                defaultVDict = dict(zip(inspect.getfullargspec(remove_outlier_curves).args[1:], 
-                                        inspect.getfullargspec(remove_outlier_curves).defaults))
-                # Manual input to function overrides the imported parameter values
-                if (not isinstance(v, (HVSRData, HVSRBatch))) and (k in orig_args.keys()) and (orig_args[k]==defaultVDict[k]):
-                    orig_args[k] = v
-
-    # Reset parameters in case of manual override of imported parameters
-    use_percentile = orig_args['use_percentile']
-    rmse_thresh = orig_args['rmse_thresh']
-    use_hv_curve = orig_args['use_hv_curve']
-    show_outlier_plot = orig_args['show_outlier_plot']
-    verbose = orig_args['verbose']
-
-    #Print if verbose, which changes depending on if batch data or not
-    if (verbose and isinstance(hvsr_data, HVSRBatch)) or (verbose and not hvsr_data['batch']):
-        if isinstance(hvsr_data, HVSRData) and hvsr_data['batch']:
-            pass
-        else:
-            print('\nRemoving outlier curves from further analysis (remove_outlier_curves())')
-            print('\tUsing the following parameters:')
-            for key, value in orig_args.items():
-                if key=='hvsr_data':
-                    pass
-                else:
-                    print('\t  {}={}'.format(key, value))
-            print()
-    
-    #First, divide up for batch or not
-    #Site is in the keys anytime it's not batch
-    if isinstance(hvsr_data, HVSRBatch):
-        #If running batch, we'll loop through each site
-        hvsr_out = {}
-        for site_name in hvsr_data.keys():
-            args = orig_args.copy() #Make a copy so we don't accidentally overwrite
-            args['hvsr_data'] = hvsr_data[site_name] #Get what would normally be the "hvsr_data" variable for each site
-            if hvsr_data[site_name]['ProcessingStatus']['OverallStatus']:
-                try:
-                    hvsr_out[site_name] = __remove_outlier_curves(**args) #Call another function, that lets us run this function again
-                except:
-                    hvsr_out = hvsr_data
-                    hvsr_out[site_name]['ProcessingStatus']['RemoveOutlierCurves'] = False
-                    hvsr_out[site_name]['ProcessingStatus']['OverallStatus'] = False                    
-            else:
-                hvsr_out = hvsr_data
-                hvsr_out[site_name]['ProcessingStatus']['RemoveOutlierCurves'] = False
-                hvsr_out[site_name]['ProcessingStatus']['OverallStatus'] = False
-        hvsr_out = HVSRBatch(hvsr_out)
-    else:  
-        #Create plot if designated        
-        if not use_hv_curve:
-            compNames = ['Z', 'E', 'N']
-            colNames = compNames
-        else:
-            compNames=['HV Curve']
-            colNames = ['HV_Curves']
-        if show_outlier_plot:
-            if use_hv_curve:
-                spMosaic = ['HV Curve']
-            else:
-                spMosaic = [['Z'],
-                            ['E'],
-                            ['N']]
-            fig, ax=plt.subplot_mosaic(spMosaic, sharex=True)
-
-        #Loop through each component, and determine which curves are outliers
-        bad_rmse=[]
-        for i, column in enumerate(colNames):
-            if column in compNames:
-                column = 'psd_values_'+column
-            # Retrieve data from dataframe (use all windows, just in case)
-            curr_data = np.stack(hvsr_data['hvsr_df'][column])
-            
-            # Calculate a median curve, and reshape so same size as original
-            medCurve = np.nanmedian(curr_data, axis=0)
-            medCurveArr = np.tile(medCurve, (curr_data.shape[0], 1))
-            
-            # Calculate RMSE
-            rmse = np.sqrt(((np.subtract(curr_data, medCurveArr)**2).sum(axis=1))/curr_data.shape[1])
-            hvsr_data['hvsr_df']['RMSE_'+column] = rmse
-            if use_percentile is True:
-                rmse_threshold = np.percentile(rmse, rmse_thresh)
-                if verbose:
-                    print(f'\tRMSE at {rmse_thresh}th percentile for {column} calculated at: {rmse_threshold:.2f}')
-                else:
-                    rmse_threshold = rmse_thresh
-            
-            # Retrieve index of those RMSE values that lie outside the threshold
-            for j, curve in enumerate(curr_data):
-                if rmse[j] > rmse_threshold:
-                    bad_rmse.append(j)
-
-            # Show plot of removed/retained data
-            if show_outlier_plot:
-                # Intialize to only get unique labels
-                rem_label_got = False
-                keep_label_got = False
-                
-                # Iterate through each curve to determine if it's rmse is outside threshold, for plot
-                for j, curve in enumerate(curr_data):
-                    label=None
-                    if rmse[j] > rmse_threshold:
-                        linestyle = 'dashed'
-                        linecolor='darkred'
-                        alpha = 1
-                        linewidth = 1
-                        if not rem_label_got:
-                            label='Removed Curve'
-                            rem_label_got=True
-                    else:
-                        linestyle='solid'
-                        linecolor = 'rosybrown'
-                        alpha = 0.25
-                        linewidth=0.5
-                        if not keep_label_got:
-                            keep_label_got=True
-                            label='Retained Curve'
-
-                    # Plot each individual curve
-                    ax[compNames[i]].plot(1/hvsr_data.ppsds[compNames[i]]['period_bin_centers'], curve, linewidth=linewidth, c=linecolor, linestyle=linestyle, alpha=alpha, label=label)
-                
-                # Plot the median curve
-                ax[compNames[i]].plot(1/hvsr_data.ppsds[compNames[i]]['period_bin_centers'],medCurve, linewidth=1, color='k', label='Median Curve')
-                
-                # Format axis
-                ax[compNames[i]].set_ylabel(f"{compNames[i]}")
-                ax[compNames[i]].legend(fontsize=10, labelspacing=0.1)
-                ax[compNames[i]].semilogx()             
-        if show_outlier_plot:
-            plt.show()
-                    
-        # Get unique values of bad_rmse indices and set the "Use" column of the hvsr_df to False for that window
-        bad_rmse = np.unique(bad_rmse)
-        if len(bad_rmse) > 0:
-            
-            hvsr_data['hvsr_df']['Use'] = hvsr_data['hvsr_df']['Use'] * (rmse_threshold > hvsr_data['hvsr_df']['RMSE_'+column])
-            #hvsr_data['hvsr_df'].loc[bad_index, "Use"] = False   
-        
-        if verbose:
-            if len(bad_rmse)>0:
-                print(f"\tThe windows starting at the following times have been removed from further analysis ({len(bad_rmse)}/{hvsr_data['hvsr_df'].shape[0]}):")
-                for b in hvsr_data['hvsr_df'].index[pd.Series(bad_rmse)]:
-                    print(f"\t\t{b}")
-            else:
-                print('\tNo outlier curves have been removed')
-                    
-        hvsr_out = hvsr_data
-
-        if 'processing_parameters' not in hvsr_out.keys():
-            hvsr_out['processing_parameters'] = {}
-        hvsr_out['processing_parameters']['remove_outlier_curves'] = {}
-        for key, value in orig_args.items():
-            hvsr_out['processing_parameters']['remove_outlier_curves'][key] = value
-
-        hvsr_data['ProcessingStatus']['RemoveOutlierCurvesStatus'] = True
-    
-    hvsr_out = _check_processing_status(hvsr_out, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose)
-    
-    return hvsr_out
-
-def run(datapath, source='file', verbose=False, **kwargs) +def run(input_data, source='file', azimuth_calculation=False, noise_removal=False, outlier_curves_removal=False, verbose=False, **kwargs)

The sprit.run() is the main function that allows you to do all your HVSR processing in one simple step (sprit.run() is how you would call it in your code, but it may also be called using sprit.sprit_hvsr.run())

-

The datapath parameter of sprit.run() is the only required parameter. This can be either a single file, a list of files (one for each component, for example), a directory (in which case, all obspy-readable files will be added to an HVSRBatch instance), a Rasp. Shake raw data directory, or sample data.

-
The sprit.run() function calls the following functions. This is the recommended order/set of functions to run to process HVSR using SpRIT. See the API documentation for these functions for more information:
-- input_params(): The datapath parameter of input_params() is the only required variable, though others may also need to be called for your data to process correctly.
-- fetch_data(): the source parameter of fetch_data() is the only explicit variable in the sprit.run() function aside from datapath and verbose. Everything else gets delivered to the correct function via the kwargs dictionary
+

The input_data parameter of sprit.run() is the only required parameter. This can be either a single file, a list of files (one for each component, for example), a directory (in which case, all obspy-readable files will be added to an HVSRBatch instance), a Rasp. Shake raw data directory, or sample data.

+

Notes

+

The sprit.run() function calls the following functions. This is the recommended order/set of functions to run to process HVSR using SpRIT. See the API documentation for these functions for more information: +- input_params(): The input_data parameter of input_params() is the only required variable, though others may also need to be called for your data to process correctly. +- fetch_data(): the source parameter of fetch_data() is the only explicit variable in the sprit.run() function aside from input_data and verbose. Everything else gets delivered to the correct function via the kwargs dictionary - remove_noise(): by default, the kind of noise removal is remove_method='auto'. See the remove_noise() documentation for more information. If remove_method is set to anything other than one of the explicit options in remove_noise, noise removal will not be carried out. - generate_ppsds(): generates ppsds for each component, which will be combined/used later. Any parameter of obspy.signal.spectral_estimation.PPSD() may also be read into this function. - remove_outlier_curves(): removes any outlier ppsd curves so that the data quality for when curves are combined will be enhanced. See the remove_outlier_curves() documentation for more information. - process_hvsr(): this is the main function processing the hvsr curve and statistics. See process_hvsr() documentation for more details. The hvsr_band parameter sets the frequency spectrum over which these calculations occur. - check_peaks(): this is the main function that will find and 'score' peaks to get a best peak. The parameter peak_freq_range can be set to limit the frequencies within which peaks are checked and scored. - get_report(): this is the main function that will print, plot, and/or save the results of the data. See the get_report() API documentation for more information. -- export_data(): this function exports the final data output as a pickle file (by default, this pickle object has a .hvsr extension). This can be used to read data back into SpRIT without having to reprocess data. -

+- export_data(): this function exports the final data output as a pickle file (by default, this pickle object has a .hvsr extension). This can be used to read data back into SpRIT without having to reprocess data.

Parameters

-
datapath : str or filepath object that can be read by obspy
+
input_data : str or filepath object that can be read by obspy
Filepath to data to be processed. This may be a file or directory, depending on what kind of data is being processed (this can be specified with the source parameter). -For sample data, The following can be specified as the datapath parameter: -- Any integer 1-6 (inclusive), or the string (e.g., datapath="1" or datapath=1 will work) -- The word "sample" before any integer (e.g., datapath="sample1") +For sample data, The following can be specified as the input_data parameter: +- Any integer 1-6 (inclusive), or the string (e.g., input_data="1" or input_data=1 will work) +- The word "sample" before any integer (e.g., input_data="sample1") - The word "sample" will default to "sample1" if source='file'. -- If source='batch', datapath should be datapath='sample' or datapath='batch'. In this case, it will read and process all the sample files using the HVSRBatch class. Set verbose=True to see all the information in the sample batch csv file.
+- If source='batch', input_data should be input_data='sample' or input_data='batch'. In this case, it will read and process all the sample files using the HVSRBatch class. Set verbose=True to see all the information in the sample batch csv file.
source : str, optional
description, by default 'file'
+
azimuth_calculation : bool, optional
+
Whether to perform azimuthal analysis, by default False.
+
noise_removal : bool, default=False
+
Whether to remove noise (before processing PPSDs)
+
outlier_curves_removal : bool, default=False
+
Whether to remove outlier curves from HVSR time windows
+
show_plot : bool, default=True
+
Whether to show plots. This does not affect whether the plots are created (and then inserted as an attribute of HVSRData), only whether they are shown.
verbose : bool, optional
description, by default False
**kwargs
Keyword arguments for the functions listed above. The keyword arguments are unique, so they will get parsed out and passed into the appropriate function.
+
input_params : function name (not an actual parameter)
+
Function for designating input parameters for reading in and processing data +See API documentation: input_params()
+
input_data : any, default = '<no default>'
+
See API documentation at link above or at help(input_params()) for specifics.
+
site : any, default = 'HVSR Site'
+
See API documentation at link above or at help(input_params()) for specifics.
+
id_prefix : any, default = None
+
See API documentation at link above or at help(input_params()) for specifics.
+
network : any, default = 'AM'
+
See API documentation at link above or at help(input_params()) for specifics.
+
station : any, default = 'RAC84'
+
See API documentation at link above or at help(input_params()) for specifics.
+
loc : any, default = '00'
+
See API documentation at link above or at help(input_params()) for specifics.
+
channels : any, default = ['EHZ', 'EHN', 'EHE']
+
See API documentation at link above or at help(input_params()) for specifics.
+
acq_date : any, default = '2024-10-30'
+
See API documentation at link above or at help(input_params()) for specifics.
+
starttime : any, default = 2024-10-30T00:00:00.000000Z
+
See API documentation at link above or at help(input_params()) for specifics.
+
endtime : any, default = 2024-10-30T23:59:59.999999Z
+
See API documentation at link above or at help(input_params()) for specifics.
+
tzone : any, default = 'UTC'
+
See API documentation at link above or at help(input_params()) for specifics.
+
xcoord : any, default = -88.2290526
+
See API documentation at link above or at help(input_params()) for specifics.
+
ycoord : any, default = 40.1012122
+
See API documentation at link above or at help(input_params()) for specifics.
+
elevation : any, default = 755
+
See API documentation at link above or at help(input_params()) for specifics.
+
input_crs : any, default = None
+
See API documentation at link above or at help(input_params()) for specifics.
+
output_crs : any, default = None
+
See API documentation at link above or at help(input_params()) for specifics.
+
elev_unit : any, default = 'meters'
+
See API documentation at link above or at help(input_params()) for specifics.
+
depth : any, default = 0
+
See API documentation at link above or at help(input_params()) for specifics.
+
instrument : any, default = 'Raspberry Shake'
+
See API documentation at link above or at help(input_params()) for specifics.
+
metapath : any, default = None
+
See API documentation at link above or at help(input_params()) for specifics.
+
hvsr_band : any, default = [0.4, 40]
+
See API documentation at link above or at help(input_params()) for specifics.
+
peak_freq_range : any, default = [0.4, 40]
+
See API documentation at link above or at help(input_params()) for specifics.
+
processing_parameters : any, default = {}
+
See API documentation at link above or at help(input_params()) for specifics.
+
verbose : any, default = False
+
See API documentation at link above or at help(input_params()) for specifics.
+
fetch_data : function name (not an actual parameter)
+
Fetch ambient seismic data from a source to read into obspy stream +See API documentation: fetch_data()
+
params : any, default = '<output of previous function>'
+
See API documentation at link above or at help(fetch_data()) for specifics.
+
source : any, default = 'file'
+
See API documentation at link above or at help(fetch_data()) for specifics.
+
data_export_path : any, default = None
+
See API documentation at link above or at help(fetch_data()) for specifics.
+
data_export_format : any, default = 'mseed'
+
See API documentation at link above or at help(fetch_data()) for specifics.
+
detrend : any, default = 'spline'
+
See API documentation at link above or at help(fetch_data()) for specifics.
+
detrend_order : any, default = 2
+
See API documentation at link above or at help(fetch_data()) for specifics.
+
update_metadata : any, default = True
+
See API documentation at link above or at help(fetch_data()) for specifics.
+
plot_input_stream : any, default = False
+
See API documentation at link above or at help(fetch_data()) for specifics.
+
plot_engine : any, default = 'matplotlib'
+
See API documentation at link above or at help(fetch_data()) for specifics.
+
show_plot : any, default = True
+
See API documentation at link above or at help(fetch_data()) for specifics.
+
verbose : any, default = False
+
See API documentation at link above or at help(fetch_data()) for specifics.
+
kwargs : any, default = {}
+
See API documentation at link above or at help(fetch_data()) for specifics.
+
calculate_azimuth : function name (not an actual parameter)
+
Function to calculate azimuthal horizontal component at specified angle(s). Adds each new horizontal +See API documentation: calculate_azimuth()
+
hvsr_data : any, default = '<output of previous function>'
+
See API documentation at link above or at help(calculate_azimuth()) for specifics.
+
azimuth_angle : any, default = 30
+
See API documentation at link above or at help(calculate_azimuth()) for specifics.
+
azimuth_type : any, default = 'multiple'
+
See API documentation at link above or at help(calculate_azimuth()) for specifics.
+
azimuth_unit : any, default = 'degrees'
+
See API documentation at link above or at help(calculate_azimuth()) for specifics.
+
show_az_plot : any, default = False
+
See API documentation at link above or at help(calculate_azimuth()) for specifics.
+
verbose : any, default = False
+
See API documentation at link above or at help(calculate_azimuth()) for specifics.
+
plot_azimuth_kwargs : any, default = {}
+
See API documentation at link above or at help(calculate_azimuth()) for specifics.
+
remove_noise : function name (not an actual parameter)
+
Function to remove noisy windows from data, using various methods. +See API documentation: remove_noise()
+
hvsr_data : any, default = '<output of previous function>'
+
See API documentation at link above or at help(remove_noise()) for specifics.
+
remove_method : any, default = None
+
See API documentation at link above or at help(remove_noise()) for specifics.
+
processing_window : any, default = None
+
See API documentation at link above or at help(remove_noise()) for specifics.
+
sat_percent : any, default = 0.995
+
See API documentation at link above or at help(remove_noise()) for specifics.
+
noise_percent : any, default = 0.8
+
See API documentation at link above or at help(remove_noise()) for specifics.
+
sta : any, default = 2
+
See API documentation at link above or at help(remove_noise()) for specifics.
+
lta : any, default = 30
+
See API documentation at link above or at help(remove_noise()) for specifics.
+
stalta_thresh : any, default = [8, 16]
+
See API documentation at link above or at help(remove_noise()) for specifics.
+
warmup_time : any, default = 0
+
See API documentation at link above or at help(remove_noise()) for specifics.
+
cooldown_time : any, default = 0
+
See API documentation at link above or at help(remove_noise()) for specifics.
+
min_win_size : any, default = 1
+
See API documentation at link above or at help(remove_noise()) for specifics.
+
remove_raw_noise : any, default = False
+
See API documentation at link above or at help(remove_noise()) for specifics.
+
show_stalta_plot : any, default = False
+
See API documentation at link above or at help(remove_noise()) for specifics.
+
verbose : any, default = False
+
See API documentation at link above or at help(remove_noise()) for specifics.
+
generate_ppsds : function name (not an actual parameter)
+
Generates PPSDs for each channel +See API documentation: generate_ppsds()
+
hvsr_data : any, default = '<output of previous function>'
+
See API documentation at link above or at help(generate_ppsds()) for specifics.
+
azimuthal_ppsds : any, default = False
+
See API documentation at link above or at help(generate_ppsds()) for specifics.
+
verbose : any, default = False
+
See API documentation at link above or at help(generate_ppsds()) for specifics.
+
ppsd_kwargs : any, default = {}
+
See API documentation at link above or at help(generate_ppsds()) for specifics.
+
process_hvsr : function name (not an actual parameter)
+
Process the input data and get HVSR data +See API documentation: process_hvsr()
+
hvsr_data : any, default = '<output of previous function>'
+
See API documentation at link above or at help(process_hvsr()) for specifics.
+
horizontal_method : any, default = None
+
See API documentation at link above or at help(process_hvsr()) for specifics.
+
smooth : any, default = True
+
See API documentation at link above or at help(process_hvsr()) for specifics.
+
freq_smooth : any, default = 'konno ohmachi'
+
See API documentation at link above or at help(process_hvsr()) for specifics.
+
f_smooth_width : any, default = 40
+
See API documentation at link above or at help(process_hvsr()) for specifics.
+
resample : any, default = True
+
See API documentation at link above or at help(process_hvsr()) for specifics.
+
outlier_curve_rmse_percentile : any, default = False
+
See API documentation at link above or at help(process_hvsr()) for specifics.
+
azimuth : any, default = None
+
See API documentation at link above or at help(process_hvsr()) for specifics.
+
verbose : any, default = False
+
See API documentation at link above or at help(process_hvsr()) for specifics.
+
remove_outlier_curves : function name (not an actual parameter)
+
Function used to remove outliers curves using Root Mean Square Error to calculate the error of each +See API documentation: remove_outlier_curves()
+
hvsr_data : any, default = '<output of previous function>'
+
See API documentation at link above or at help(remove_outlier_curves()) for specifics.
+
rmse_thresh : any, default = 98
+
See API documentation at link above or at help(remove_outlier_curves()) for specifics.
+
use_percentile : any, default = True
+
See API documentation at link above or at help(remove_outlier_curves()) for specifics.
+
use_hv_curve : any, default = False
+
See API documentation at link above or at help(remove_outlier_curves()) for specifics.
+
plot_engine : any, default = 'matplotlib'
+
See API documentation at link above or at help(remove_outlier_curves()) for specifics.
+
show_plot : any, default = False
+
See API documentation at link above or at help(remove_outlier_curves()) for specifics.
+
verbose : any, default = False
+
See API documentation at link above or at help(remove_outlier_curves()) for specifics.
+
check_peaks : function name (not an actual parameter)
+
Function to run tests on HVSR peaks to find best one and see if it passes quality checks +See API documentation: check_peaks()
+
hvsr_data : any, default = '<output of previous function>'
+
See API documentation at link above or at help(check_peaks()) for specifics.
+
hvsr_band : any, default = [0.4, 40]
+
See API documentation at link above or at help(check_peaks()) for specifics.
+
peak_selection : any, default = 'max'
+
See API documentation at link above or at help(check_peaks()) for specifics.
+
peak_freq_range : any, default = [0.4, 40]
+
See API documentation at link above or at help(check_peaks()) for specifics.
+
azimuth : any, default = 'HV'
+
See API documentation at link above or at help(check_peaks()) for specifics.
+
verbose : any, default = False
+
See API documentation at link above or at help(check_peaks()) for specifics.
+
get_report : function name (not an actual parameter)
+
Generate and/or print and/or export a report of the HVSR analysis in a variety of formats. +See API documentation: get_report()
+
hvsr_results : any, default = '<output of previous function>'
+
See API documentation at link above or at help(get_report()) for specifics.
+
report_formats : any, default = ['print', 'table', 'plot', 'html', 'pdf']
+
See API documentation at link above or at help(get_report()) for specifics.
+
azimuth : any, default = 'HV'
+
See API documentation at link above or at help(get_report()) for specifics.
+
plot_type : any, default = 'HVSR p ann C+ p ann Spec p ann'
+
See API documentation at link above or at help(get_report()) for specifics.
+
plot_engine : any, default = 'matplotlib'
+
See API documentation at link above or at help(get_report()) for specifics.
+
show_print_report : any, default = True
+
See API documentation at link above or at help(get_report()) for specifics.
+
show_table_report : any, default = False
+
See API documentation at link above or at help(get_report()) for specifics.
+
show_plot_report : any, default = True
+
See API documentation at link above or at help(get_report()) for specifics.
+
show_html_report : any, default = False
+
See API documentation at link above or at help(get_report()) for specifics.
+
show_pdf_report : any, default = True
+
See API documentation at link above or at help(get_report()) for specifics.
+
suppress_report_outputs : any, default = False
+
See API documentation at link above or at help(get_report()) for specifics.
+
show_report_outputs : any, default = False
+
See API documentation at link above or at help(get_report()) for specifics.
+
csv_handling : any, default = 'append'
+
See API documentation at link above or at help(get_report()) for specifics.
+
report_export_format : any, default = None
+
See API documentation at link above or at help(get_report()) for specifics.
+
report_export_path : any, default = None
+
See API documentation at link above or at help(get_report()) for specifics.
+
verbose : any, default = False
+
See API documentation at link above or at help(get_report()) for specifics.
+
kwargs : any, default = {}
+
See API documentation at link above or at help(get_report()) for specifics.
+
export_data : function name (not an actual parameter)
+
Export data into pickle format that can be read back in using import_data() so data does not need to +See API documentation: export_data()
+
hvsr_data : any, default = '<output of previous function>'
+
See API documentation at link above or at help(export_data()) for specifics.
+
hvsr_export_path : any, default = None
+
See API documentation at link above or at help(export_data()) for specifics.
+
ext : any, default = 'hvsr'
+
See API documentation at link above or at help(export_data()) for specifics.
+
verbose : any, default = False
+
See API documentation at link above or at help(export_data()) for specifics.

Returns

@@ -6984,256 +1146,12 @@

Raises

RuntimeError
If the data being processed is a single file, an error will be raised if generate_ppsds() does not work correctly. No errors are raised for remove_noise() errors (since that is an optional step) and the process_hvsr() step (since that is the last processing step) .
-
- -Expand source code - -
def run(datapath, source='file', verbose=False, **kwargs):
-    """The sprit.run() is the main function that allows you to do all your HVSR processing in one simple step (sprit.run() is how you would call it in your code, but it may also be called using sprit.sprit_hvsr.run())
-    
-    The datapath parameter of sprit.run() is the only required parameter. This can be either a single file, a list of files (one for each component, for example), a directory (in which case, all obspy-readable files will be added to an HVSRBatch instance), a Rasp. Shake raw data directory, or sample data.
-    
-        The sprit.run() function calls the following functions. This is the recommended order/set of functions to run to process HVSR using SpRIT. See the API documentation for these functions for more information:
-        - input_params(): The datapath parameter of input_params() is the only required variable, though others may also need to be called for your data to process correctly.
-        - fetch_data(): the source parameter of fetch_data() is the only explicit variable in the sprit.run() function aside from datapath and verbose. Everything else gets delivered to the correct function via the kwargs dictionary
-        - remove_noise(): by default, the kind of noise removal is remove_method='auto'. See the remove_noise() documentation for more information. If remove_method is set to anything other than one of the explicit options in remove_noise, noise removal will not be carried out.
-        - generate_ppsds(): generates ppsds for each component, which will be combined/used later. Any parameter of obspy.signal.spectral_estimation.PPSD() may also be read into this function.
-        - remove_outlier_curves(): removes any outlier ppsd curves so that the data quality for when curves are combined will be enhanced. See the remove_outlier_curves() documentation for more information.
-        - process_hvsr(): this is the main function processing the hvsr curve and statistics. See process_hvsr() documentation for more details. The hvsr_band parameter sets the frequency spectrum over which these calculations occur.
-        - check_peaks(): this is the main function that will find and 'score' peaks to get a best peak. The parameter peak_freq_range can be set to limit the frequencies within which peaks are checked and scored.
-        - get_report(): this is the main function that will print, plot, and/or save the results of the data. See the get_report() API documentation for more information.
-        - export_data(): this function exports the final data output as a pickle file (by default, this pickle object has a .hvsr extension). This can be used to read data back into SpRIT without having to reprocess data.
-
-    Parameters
-    ----------
-    datapath : str or filepath object that can be read by obspy
-        Filepath to data to be processed. This may be a file or directory, depending on what kind of data is being processed (this can be specified with the source parameter). 
-        For sample data, The following can be specified as the datapath parameter:
-            - Any integer 1-6 (inclusive), or the string (e.g., datapath="1" or datapath=1 will work)
-            - The word "sample" before any integer (e.g., datapath="sample1")
-            - The word "sample" will default to "sample1" if source='file'. 
-            - If source='batch', datapath should be datapath='sample' or datapath='batch'. In this case, it will read and process all the sample files using the HVSRBatch class. Set verbose=True to see all the information in the sample batch csv file.
-    source : str, optional
-        _description_, by default 'file'
-    verbose : bool, optional
-        _description_, by default False
-    **kwargs
-        Keyword arguments for the functions listed above. The keyword arguments are unique, so they will get parsed out and passed into the appropriate function.
-
-    Returns
-    -------
-    hvsr_results : sprit.HVSRData or sprit.HVSRBatch object
-        If a single file/data point is being processed, a HVSRData object will be returned. Otherwise, it will be a HVSRBatch object. See their documention for more information.
-
-    Raises
-    ------
-    RuntimeError
-        If the input parameter may not be read correctly. This is raised if the input_params() function fails. This raises an error since no other data processing or reading steps will be able to carried out correctly.
-    RuntimeError
-        If the data is not read/fetched correctly using fetch_data(), an error will be raised. This is raised if the fetch_data() function fails. This raises an error since no other data processing steps will be able to carried out correctly.
-    RuntimeError
-        If the data being processed is a single file, an error will be raised if generate_ppsds() does not work correctly. No errors are raised for remove_noise() errors (since that is an optional step) and the process_hvsr() step (since that is the last processing step) .
-    """
-   
-    if 'hvsr_band' not in kwargs.keys():
-        kwargs['hvsr_band'] = inspect.signature(input_params).parameters['hvsr_band'].default
-    if 'peak_freq_range' not in kwargs.keys():
-        kwargs['peak_freq_range'] = inspect.signature(input_params).parameters['peak_freq_range'].default
-
-    #Get the input parameters
-    input_params_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in tuple(inspect.signature(input_params).parameters.keys())}  
-    try:
-        params = input_params(datapath=datapath, verbose=verbose, **input_params_kwargs)
-    except:
-        #Even if batch, this is reading in data for all sites so we want to raise error, not just warn
-        raise RuntimeError('Input parameters not read correctly, see sprit.input_params() function and parameters')
-        #If input_params fails, initialize params as an HVSRDATA
-        params = {'ProcessingStatus':{'InputParamsStatus':False, 'OverallStatus':False}}
-        params.update(input_params_kwargs)
-        params = sprit_utils.make_it_classy(params)
-
-    #Fetch Data
-    try:
-        fetch_data_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in tuple(inspect.signature(fetch_data).parameters.keys())}
-        dataIN = fetch_data(params=params, source=source, verbose=verbose, **fetch_data_kwargs)    
-    except:
-        #Even if batch, this is reading in data for all sites so we want to raise error, not just warn
-        raise RuntimeError('Data not read correctly, see sprit.fetch_data() function and parameters for more details.')
-    
-    # Remove Noise
-    try:
-        remove_noise_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in tuple(inspect.signature(remove_noise).parameters.keys())}
-        data_noiseRemoved = remove_noise(hvsr_data=dataIN, verbose=verbose,**remove_noise_kwargs)   
-    except:
-        data_noiseRemoved = dataIN
-        
-        #Reformat data so HVSRData and HVSRBatch data both work here
-        if isinstance(data_noiseRemoved, HVSRData):
-            data_noiseRemoved = {'place_holder_sitename':data_noiseRemoved}
-            dataIN = {'place_holder_sitename':dataIN}
-            
-        for site_name in data_noiseRemoved.keys():
-            data_noiseRemoved[site_name]['ProcessingStatus']['RemoveNoiseStatus']=False
-            #Since noise removal is not required for data processing, check others first
-            if dataIN[site_name]['ProcessingStatus']['OverallStatus']:
-                data_noiseRemoved[site_name]['ProcessingStatus']['OverallStatus'] = True        
-            else:
-                data_noiseRemoved[site_name]['ProcessingStatus']['OverallStatus'] = False
-
-            #If it wasn't originally HVSRBatch, make it HVSRData object again
-            if not data_noiseRemoved[site_name]['batch']:
-                data_noiseRemoved = data_noiseRemoved[site_name]
-    
-    # Generate PPSDs
-    try:
-        generate_ppsds_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in tuple(inspect.signature(generate_ppsds).parameters.keys())}
-        PPSDkwargs = {k: v for k, v in locals()['kwargs'].items() if k in tuple(inspect.signature(PPSD).parameters.keys())}
-        generate_ppsds_kwargs.update(PPSDkwargs)
-        ppsd_data = generate_ppsds(hvsr_data=data_noiseRemoved, verbose=verbose,**generate_ppsds_kwargs)
-    except Exception as e:
-        if source == 'file' or source=='raw':
-            if hasattr(e, 'message'):
-                errMsg = e.message
-            else:
-                errMsg = e
-            raise RuntimeError(f"generate_ppsds() error: {errMsg}")
-
-        #Reformat data so HVSRData and HVSRBatch data both work here
-        ppsd_data = data_noiseRemoved
-        if isinstance(ppsd_data, HVSRData):
-            ppsd_data = {'place_holder_sitename':ppsd_data}
-            
-        for site_name in ppsd_data.keys(): #This should work more or less the same for batch and regular data now
-            ppsd_data[site_name]['ProcessingStatus']['PPSDStatus']=False
-            ppsd_data[site_name]['ProcessingStatus']['OverallStatus'] = False
-    
-            #If it wasn't originally HVSRBatch, make it HVSRData object again
-            if not ppsd_data[site_name]['batch']:
-                ppsd_data = ppsd_data[site_name]
-    
-    # Remove Outlier Curves
-    try:
-        remove_outlier_curve_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in tuple(inspect.signature(remove_outlier_curves).parameters.keys())}
-        data_curvesRemoved = remove_outlier_curves(hvsr_data=ppsd_data, verbose=verbose,**remove_outlier_curve_kwargs)   
-    except Exception as e:
-        traceback.print_exception(sys.exc_info()[1])
-        exc_type, exc_obj, tb = sys.exc_info()
-        f = tb.tb_frame
-        lineno = tb.tb_lineno
-        filename = f.f_code.co_filename
-        errLineNo = str(traceback.extract_tb(sys.exc_info()[2])[-1].lineno)
-        error_category = type(e).__name__.title().replace('error', 'Error')
-        error_message = f"{e} ({errLineNo})"
-        print(f"{error_category} ({errLineNo}): {error_message}")
-        print(lineno, filename, f)
-        
-        #Reformat data so HVSRData and HVSRBatch data both work here
-        data_curvesRemoved = ppsd_data
-        if isinstance(data_curvesRemoved, HVSRData):
-            data_curvesRemoved = {'place_holder_sitename':data_curvesRemoved}
-            
-        for site_name in data_curvesRemoved.keys(): #This should work more or less the same for batch and regular data now
-            data_curvesRemoved[site_name]['ProcessingStatus']['RemoveOutlierCurvesStatus'] = False
-            data_curvesRemoved[site_name]['ProcessingStatus']['OverallStatus'] = False
-    
-            #If it wasn't originally HVSRBatch, make it HVSRData object again
-            if not data_curvesRemoved[site_name]['batch']:
-                data_curvesRemoved = data_curvesRemoved[site_name]
-    
-    # Process HVSR Curves
-    try:
-        process_hvsr_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in tuple(inspect.signature(process_hvsr).parameters.keys())}
-        hvsr_results = process_hvsr(hvsr_data=ppsd_data, verbose=verbose,**process_hvsr_kwargs)
-    except Exception as e:
-        traceback.print_exception(sys.exc_info()[1])
-        exc_type, exc_obj, tb = sys.exc_info()
-        f = tb.tb_frame
-        lineno = tb.tb_lineno
-        filename = f.f_code.co_filename
-        errLineNo = str(traceback.extract_tb(sys.exc_info()[2])[-1].lineno)
-        error_category = type(e).__name__.title().replace('error', 'Error')
-        error_message = f"{e} ({errLineNo})"
-        print(f"{error_category} ({errLineNo}): {error_message}")
-        print(lineno, filename, f)
-
-        hvsr_results = ppsd_data
-        if isinstance(hvsr_results, HVSRData):
-            hvsr_results = {'place_holder_sitename':hvsr_results}
-            
-        for site_name in hvsr_results.keys(): #This should work more or less the same for batch and regular data now
-        
-            hvsr_results[site_name]['ProcessingStatus']['HVStatus']=False
-            hvsr_results[site_name]['ProcessingStatus']['OverallStatus'] = False
-            
-            #If it wasn't originally HVSRBatch, make it HVSRData object again
-            if not hvsr_results[site_name]['batch']:
-                hvsr_results = hvsr_results[site_name]            
-            
-    #Final post-processing/reporting
-
-    # Check peaks
-    check_peaks_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in tuple(inspect.signature(check_peaks).parameters.keys())}
-    hvsr_results = check_peaks(hvsr_data=hvsr_results, verbose=verbose, **check_peaks_kwargs)
-
-    get_report_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in tuple(inspect.signature(get_report).parameters.keys())}
-    get_report(hvsr_results=hvsr_results, verbose=verbose, **get_report_kwargs)
-
-    if verbose:
-        if 'report_format' in get_report_kwargs.keys():
-            #if report_format is 'print', we would have already printed it in previous step
-            if get_report_kwargs['report_format']=='print' or 'print' in get_report_kwargs['report_format'] or isinstance(hvsr_results, HVSRBatch):
-                #We do not need to print another report if already printed to terminal
-                pass
-            else:
-                #We will just change the report_format kwarg to print, since we already got the originally intended report format above, 
-                #   now need to print for verbose output
-                get_report_kwargs['report_format']='print'
-                get_report(hvsr_results=hvsr_results, **get_report_kwargs)
-                
-            if get_report_kwargs['report_format']=='plot' or 'plot' in get_report_kwargs['report_format']:
-                #We do not need to plot another report if already plotted
-                pass
-            else:
-                #hvplot_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in plot_hvsr.__code__.co_varnames}
-                #hvsr_results['HV_Plot'] = plot_hvsr(hvsr_results, return_fig=True, show=False, close_figs=True)
-                pass
-        else:
-            pass
-    
-    #Export processed data if export_path(as pickle currently, default .hvsr extension)
-    if 'export_path' in kwargs.keys():
-        if kwargs['export_path'] is None:
-            pass
-        else:
-            if 'ext' in kwargs.keys():
-                ext = kwargs['ext']
-            else:
-                ext = 'hvsr'
-            export_data(hvsr_data=hvsr_results, export_path=kwargs['export_path'], ext=ext, verbose=verbose)        
-
-    return hvsr_results
-
def time_it(_t, proc_name='', verbose=True)

Computes elapsed time since the last call.

-
- -Expand source code - -
def time_it(_t, proc_name='', verbose=True):
-    """Computes elapsed time since the last call."""
-    t1 = datetime.datetime.now().time()
-    dt = t1 - _t
-    t = _t
-    if dt > 0.05:
-        if verbose:
-            print(f'[ELAPSED TIME] {dt:0.1f} s', flush=True)
-        t = t1
-    return t
-
def x_mark(incolor=False, inTerminal=False) @@ -7241,23 +1159,6 @@

Raises

The default Windows terminal is not able to display the check mark character correctly. This function returns another displayable character if platform is Windows

-
- -Expand source code - -
def x_mark(incolor=False, inTerminal=False):
-    """The default Windows terminal is not able to display the check mark character correctly.
-       This function returns another displayable character if platform is Windows"""
-    
-    if incolor:
-        try:
-            xmark = get_char(u'\u274C')
-        except:
-            xmark = get_char(u'\u2718')
-    else:
-        xmark = get_char(u'\u2718')
-    return xmark
-
@@ -7269,54 +1170,50 @@

Classes

(*args, **kwargs)
-

HVSRBatch is the data container used for batch processing. It contains several HVSRData objects (one for each site). These can be accessed using their site name, either square brackets (HVSRBatchVariable["SiteName"]) or the dot (HVSRBatchVariable.SiteName) accessor.

+

HVSRBatch is the data container used for batch processing. +It contains several HVSRData objects (one for each site). +These can be accessed using their site name, +either square brackets (HVSRBatchVariable["SiteName"]) or the dot (HVSRBatchVariable.SiteName) accessor.

The dot accessor may not work if there is a space in the site name.

All of the -functions in the sprit.pacakge are designed to perform the bulk of their operations iteratively on the individual HVSRData objects contained in the HVSRBatch object, and do little with the HVSRBatch object itself, besides using it determine which sites are contained within it.

+functions in the sprit package are designed to perform the bulk of their operations iteratively +on the individual HVSRData objects contained in the HVSRBatch object, and do little with the HVSRBatch object itself, +besides using it determine which sites are contained within it.

Expand source code
class HVSRBatch:
-    """HVSRBatch is the data container used for batch processing. It contains several HVSRData objects (one for each site). These can be accessed using their site name, either square brackets (HVSRBatchVariable["SiteName"]) or the dot (HVSRBatchVariable.SiteName) accessor.
+    """HVSRBatch is the data container used for batch processing. 
+    It contains several HVSRData objects (one for each site). 
+    These can be accessed using their site name, 
+    either square brackets (HVSRBatchVariable["SiteName"]) or the dot (HVSRBatchVariable.SiteName) accessor.
     
     The dot accessor may not work if there is a space in the site name.
     
-    All of the  functions in the sprit.pacakge are designed to perform the bulk of their operations iteratively on the individual HVSRData objects contained in the HVSRBatch object, and do little with the HVSRBatch object itself, besides using it determine which sites are contained within it.
+    All of the  functions in the sprit package are designed to perform the bulk of their operations iteratively
+    on the individual HVSRData objects contained in the HVSRBatch object, and do little with the HVSRBatch object itself, 
+    besides using it determine which sites are contained within it.
     
     """
     @check_instance
-    def __init__(self, batch_dict, azimuth=None):
+    def __init__(self, batch_dict):
         """HVSR Batch initializer
 
         Parameters
         ----------
         batch_dict : dict
-            Dictionary containing Key value pairs with either {sitename:HVSRData object} or {azimuth_angle_degrees:HVSRData object}
-        azimuth : None or numeric, default=None
-            If None, HVSRBatch object will be a batch of sites. If other value, it should be a list of numeric values of the azimuths (in degrees), by default None.
+            Dictionary containing Key value pairs with {sitename: HVSRData object}
         """
         self._batch_dict = batch_dict
         self.batch_dict = self._batch_dict
         self.batch = True
-        self.batch_type = 'sites'
-        if azimuth is not None:
-            self.batch_type = 'azimuths'
         
-        if self.batch_type=='sites':
-            for sitename, hvsrdata in batch_dict.items():
-                setattr(self, sitename, hvsrdata)
-                self[sitename]['batch']=True  
-            self.sites = list(self._batch_dict.keys())
-            self.azimuths = azimuth # Should be None
-        elif self.batch_tupe =='azimuths':
-            self.azimuths = azimuth
-            self.sites = []
-            for az, hvsrdata in batch_dict.items():
-                azkey = str(az).zfill(3)
-                setattr(self, azkey, hvsrdata)
-                self[azkey]['batch']=True
-                self.sites.append(hvsrdata['site'])
+        for sitename, hvsrdata in batch_dict.items():
+            setattr(self, sitename, hvsrdata)
+            self[sitename]['batch'] = True  
+        self.sites = list(self._batch_dict.keys())
+
 
     #METHODS
     def __to_json(self, filepath):
@@ -7332,17 +1229,17 @@ 

Classes

# dump the JSON string to the file json.dump(self, f, default=lambda o: o.__dict__, sort_keys=True, indent=4) - def export(self, export_path=True, ext='hvsr'): + def export(self, hvsr_export_path=True, ext='hvsr'): """Method to export HVSRData objects in HVSRBatch container to indivdual .hvsr pickle files. Parameters ---------- - export_path : filepath, default=True - Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). By default True. If True, it will first try to save each file to the same directory as datapath, then if that does not work, to the current working directory, then to the user's home directory, by default True + hvsr_export_path : filepath, default=True + Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). By default True. If True, it will first try to save each file to the same directory as input_data, then if that does not work, to the current working directory, then to the user's home directory, by default True ext : str, optional The extension to use for the output, by default 'hvsr'. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension. """ - export_data(hvsr_data=self, export_path=export_path, ext=ext) + export_data(hvsr_data=self, hvsr_export_path=hvsr_export_path, ext=ext) def keys(self): """Method to return the "keys" of the HVSRBatch object. For HVSRBatch objects, these are the site names. Functions similar to dict.keys(). @@ -7380,12 +1277,19 @@

Classes

#Method wrapper of sprit.plot_hvsr function def plot(self, **kwargs): - """Method to plot data, based on the sprit.plot_hvsr() function. All the same kwargs and default values apply as plot_hvsr(). For return_fig, returns it to the 'Plot_Report' attribute of each HVSRData object + """Method to plot data, based on the sprit.plot_hvsr() function. + + All the same kwargs and default values apply as plot_hvsr(). + For return_fig, returns it to the 'Plot_Report' attribute of each HVSRData object Returns ------- _type_ _description_ + + See Also + -------- + plot_hvsr """ for sitename in self: if 'return_fig' in kwargs.keys() and kwargs['return_fig']: @@ -7402,14 +1306,18 @@

Classes

------- Variable May return nothing, pandas.Dataframe, or pyplot Figure, depending on input. + + See Also + -------- + get_report """ - if 'report_format' in kwargs.keys(): - if 'csv' == kwargs['report_format']: + if 'report_formats' in kwargs.keys(): + if 'table' == kwargs['report_formats']: for sitename in self: rowList = [] rowList.append(get_report(self[sitename], **kwargs)) return pd.concat(rowList, ignore_index=True) - elif 'plot' == kwargs['report_format']: + elif 'plot' == kwargs['report_formats']: plotDict = {} for sitename in self: if 'return_fig' in kwargs.keys() and kwargs['return_fig']: @@ -7418,17 +1326,25 @@

Classes

get_report(self[sitename], **kwargs) return plotDict - #Only report_format left is print, doesn't return anything, so doesn't matter if defalut or not + #Only report_formats left is print, doesn't return anything, so doesn't matter if defalut or not for sitename in self: get_report(self[sitename], **kwargs) return def report(self, **kwargs): - """Wrapper of get_report()""" + """Wrapper of get_report() + + See Also + -------- + get_report + """ return self.get_report(**kwargs) def export_settings(self, site_name=None, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True): - """Method to export settings from HVSRData object in HVSRBatch object. Simply calls sprit.export_settings() from specified HVSRData object in the HVSRBatch object. See sprit.export_settings() for more details. + """Method to export settings from HVSRData object in HVSRBatch object. + + Simply calls sprit.export_settings() from specified HVSRData object in the HVSRBatch object. + See sprit.export_settings() for more details. Parameters ---------- @@ -7442,6 +1358,11 @@

Classes

Whether to include the location information in the instrument settings, if that settings type is selected, by default False verbose : bool, optional Whether to print output (filepath and settings) to terminal, by default True + + + See Also + -------- + export_settings """ #If no site name selected, use first site if site_name is None: @@ -7471,59 +1392,27 @@

Parameters

type : str {'shallow', 'deep'}
Based on input, creates either a shallow or deep copy of the HVSRBatch object. Shallow is equivalent of copy.copy(). Input of 'deep' is equivalent of copy.deepcopy() (still experimental). Defaults to shallow.
-
- -Expand source code - -
def copy(self, type='shallow'):
-    """Make a copy of the HVSRBatch object. Uses python copy module.
-    
-    Parameters
-    ----------
-    type : str {'shallow', 'deep'}
-        Based on input, creates either a shallow or deep copy of the HVSRBatch object. Shallow is equivalent of copy.copy(). Input of 'deep' is equivalent of copy.deepcopy() (still experimental). Defaults to shallow.
-
-    """
-    if type.lower()=='deep':
-        return HVSRBatch(copy.deepcopy(self._batch_dict))
-    else:
-        return HVSRBatch(copy.copy(self._batch_dict))
-
-def export(self, export_path=True, ext='hvsr') +def export(self, hvsr_export_path=True, ext='hvsr')

Method to export HVSRData objects in HVSRBatch container to indivdual .hvsr pickle files.

Parameters

-
export_path : filepath, default=True
-
Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). By default True. If True, it will first try to save each file to the same directory as datapath, then if that does not work, to the current working directory, then to the user's home directory, by default True
+
hvsr_export_path : filepath, default=True
+
Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). By default True. If True, it will first try to save each file to the same directory as input_data, then if that does not work, to the current working directory, then to the user's home directory, by default True
ext : str, optional
The extension to use for the output, by default 'hvsr'. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension.
-
- -Expand source code - -
def export(self, export_path=True, ext='hvsr'):
-    """Method to export HVSRData objects in HVSRBatch container to indivdual .hvsr pickle files.
-
-    Parameters
-    ----------
-    export_path : filepath, default=True
-        Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). By default True. If True, it will first try to save each file to the same directory as datapath, then if that does not work, to the current working directory, then to the user's home directory, by default True
-    ext : str, optional
-        The extension to use for the output, by default 'hvsr'. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension.
-    """
-    export_data(hvsr_data=self, export_path=export_path, ext=ext)
-
def export_settings(self, site_name=None, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True)
-

Method to export settings from HVSRData object in HVSRBatch object. Simply calls sprit.export_settings() from specified HVSRData object in the HVSRBatch object. See sprit.export_settings() for more details.

+

Method to export settings from HVSRData object in HVSRBatch object.

+

Simply calls sprit.export_settings() from specified HVSRData object in the HVSRBatch object. +See sprit.export_settings() for more details.

Parameters

site_name : str, default=None
@@ -7536,34 +1425,9 @@

Parameters

Whether to include the location information in the instrument settings, if that settings type is selected, by default False
verbose : bool, optional
Whether to print output (filepath and settings) to terminal, by default True
-
-
- -Expand source code - -
def export_settings(self, site_name=None, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True):
-    """Method to export settings from HVSRData object in HVSRBatch object. Simply calls sprit.export_settings() from specified HVSRData object in the HVSRBatch object. See sprit.export_settings() for more details.
-
-    Parameters
-    ----------
-    site_name : str, default=None
-        The name of the site whose settings should be exported. If None, will default to the first site, by default None.
-    export_settings_path : str, optional
-        Filepath to output file. If left as 'default', will save as the default value in the resources directory. If that is not possible, will save to home directory, by default 'default'
-    export_settings_type : str, {'all', 'instrument', 'processing'}, optional
-        They type of settings to save, by default 'all'
-    include_location : bool, optional
-        Whether to include the location information in the instrument settings, if that settings type is selected, by default False
-    verbose : bool, optional
-        Whether to print output (filepath and settings) to terminal, by default True
-    """
-    #If no site name selected, use first site
-    if site_name is None:
-        site_name = self.sites[0]
-        
-    export_settings(hvsr_data=self[site_name], 
-                    export_settings_path=export_settings_path, export_settings_type=export_settings_type, include_location=include_location, verbose=verbose)
-
+ +

See Also

+

export_settings()

def get_report(self, **kwargs) @@ -7574,39 +1438,9 @@

Returns

Variable
May return nothing, pandas.Dataframe, or pyplot Figure, depending on input.
-
-
- -Expand source code - -
def get_report(self, **kwargs):
-    """Method to get report from processed data, in print, graphical, or tabular format.
-
-    Returns
-    -------
-    Variable
-        May return nothing, pandas.Dataframe, or pyplot Figure, depending on input.
-    """
-    if 'report_format' in kwargs.keys():
-        if 'csv' == kwargs['report_format']:
-            for sitename in self:
-                rowList = []
-                rowList.append(get_report(self[sitename], **kwargs))
-            return pd.concat(rowList, ignore_index=True)
-        elif 'plot' == kwargs['report_format']:
-            plotDict = {}
-            for sitename in self:
-                if 'return_fig' in kwargs.keys() and kwargs['return_fig']:
-                    plotDict[sitename] = get_report(self[sitename], **kwargs)
-                else:
-                    get_report(self[sitename], **kwargs)
-            return plotDict
-        
-    #Only report_format left is print, doesn't return anything, so doesn't matter if defalut or not
-    for sitename in self:
-        get_report(self[sitename], **kwargs)
-    return
-
+ +

See Also

+

get_report()

def items(self) @@ -7618,20 +1452,6 @@

Returns

_type_
description
-
- -Expand source code - -
def items(self):
-    """Method to return both the site names and the HVSRData object as a set of dict_items tuples. Functions similar to dict.items().
-
-    Returns
-    -------
-    _type_
-        _description_
-    """
-    return self.batch_dict.items()
-
def keys(self) @@ -7643,65 +1463,29 @@

Returns

dict_keys
A dict_keys object listing the site names of each of the HVSRData objects contained in the HVSRBatch object
-
- -Expand source code - -
def keys(self):
-    """Method to return the "keys" of the HVSRBatch object. For HVSRBatch objects, these are the site names. Functions similar to dict.keys().
-
-    Returns
-    -------
-    dict_keys
-        A dict_keys object listing the site names of each of the HVSRData objects contained in the HVSRBatch object
-    """
-    return self.batch_dict.keys()
-
def plot(self, **kwargs)
-

Method to plot data, based on the sprit.plot_hvsr() function. All the same kwargs and default values apply as plot_hvsr(). For return_fig, returns it to the 'Plot_Report' attribute of each HVSRData object

+

Method to plot data, based on the sprit.plot_hvsr() function.

+

All the same kwargs and default values apply as plot_hvsr(). +For return_fig, returns it to the 'Plot_Report' attribute of each HVSRData object

Returns

_type_
description
-
-
- -Expand source code - -
def plot(self, **kwargs):
-    """Method to plot data, based on the sprit.plot_hvsr() function. All the same kwargs and default values apply as plot_hvsr(). For return_fig, returns it to the 'Plot_Report' attribute of each HVSRData object
-
-    Returns
-    -------
-    _type_
-        _description_
-    """
-    for sitename in self:
-        if 'return_fig' in kwargs.keys() and kwargs['return_fig']:
-            self[sitename]['Plot_Report'] = plot_hvsr(self[sitename], **kwargs)
-        else:
-            plot_hvsr(self[sitename], **kwargs)
-
-    return self
-
+ +

See Also

+

plot_hvsr()

def report(self, **kwargs)
-

Wrapper of get_report()

-
- -Expand source code - -
def report(self, **kwargs):
-    """Wrapper of get_report()"""
-    return self.get_report(**kwargs)
-
+

Wrapper of get_report()

+

See Also

+

get_report()

@@ -7710,7 +1494,8 @@

Returns

(*args, **kwargs)
-

HVSRData is the basic data class of the sprit package. It contains all the processed data, input parameters, and reports.

+

HVSRData is the basic data class of the sprit package. +It contains all the processed data, input parameters, and reports.

These attributes and objects can be accessed using square brackets or the dot accessor. For example, to access the site name, HVSRData['site'] and HVSRData.site will both return the site name.

Some of the methods that work on the HVSRData object (e.g., .plot() and .get_report()) are essentially wrappers for some of the main sprit package functions (sprit.plot_hvsr() and sprit.get_report(), respectively)

@@ -7718,18 +1503,13 @@

Returns

Expand source code
class HVSRData:
-    """HVSRData is the basic data class of the sprit package. It contains all the processed data, input parameters, and reports.
+    """HVSRData is the basic data class of the sprit package. 
+    It contains all the processed data, input parameters, and reports.
     
     These attributes and objects can be accessed using square brackets or the dot accessor. For example, to access the site name, HVSRData['site'] and HVSRData.site will both return the site name.
     
     Some of the methods that work on the HVSRData object (e.g., .plot() and .get_report()) are essentially wrappers for some of the main sprit package functions (sprit.plot_hvsr() and sprit.get_report(), respectively)
     """
-    #Old way of using this
-    #def __new__(cls, params):
-    #    if isinstance(params, (cls, HVSRBatch)):
-    #        return params
-    #    return super().__new__(cls)
-
     @check_instance    
     def __init__(self, params):
         self.params = params
@@ -7765,21 +1545,21 @@ 

Returns

# dump the JSON string to the file json.dump(self, f, default=unseriable_fun, sort_keys=True, indent=4) - def export(self, export_path=None, ext='hvsr'): + def export(self, hvsr_export_path=None, ext='hvsr'): """Method to export HVSRData objects to .hvsr pickle files. Parameters ---------- - export_path : filepath, default=True + hvsr_export_path : filepath, default=True Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). By default True. - If True, it will first try to save each file to the same directory as datapath, then if that does not work, to the current working directory, then to the user's home directory, by default True + If True, it will first try to save each file to the same directory as input_data, then if that does not work, to the current working directory, then to the user's home directory, by default True ext : str, optional The extension to use for the output, by default 'hvsr'. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension. """ - export_data(hvsr_data=self, export_path=export_path, ext=ext) + export_data(hvsr_data=self, hvsr_export_path=hvsr_export_path, ext=ext) - #METHODS (many reflect dictionary methods) + # METHODS (many reflect dictionary methods) def keys(self): """Method to return the "keys" of the HVSRData object. For HVSRData objects, these are the attributes and parameters of the object. Functions similar to dict.keys(). @@ -7824,6 +1604,11 @@

Returns

Returns ------- matplotlib.Figure, matplotlib.Axis (if return_fig=True) + + See Also + -------- + plot_hvsr + plot_azimuth """ if 'close_figs' not in kwargs.keys(): kwargs['close_figs']=True @@ -7838,12 +1623,21 @@

Returns

------- Variable May return nothing, pandas.Dataframe, or pyplot Figure, depending on input. + + See Also + -------- + get_report """ report_return = get_report(self, **kwargs) return report_return def report(self, **kwargs): - """Wrapper of get_report()""" + """Wrapper of get_report() + + See Also + -------- + get_report + """ report_return = get_report(self, **kwargs) return report_return @@ -7963,7 +1757,7 @@

Returns

Instance variables

-
var batch
+
prop batch

Whether this HVSRData object is part of an HVSRBatch object. This is used throughout the code to help direct the object into the proper processing pipeline.

Returns

@@ -7987,7 +1781,7 @@

Returns

return self._batch
-
var datastream
+
prop datastream

A copy of the original obspy datastream read in. This helps to retain the original data even after processing is carried out.

Returns

@@ -8011,7 +1805,7 @@

Returns

return self._datastream
-
var params
+
prop params

Dictionary containing the parameters used to process the data

Returns

@@ -8035,7 +1829,7 @@

Returns

return self._params
-
var ppsds
+
prop ppsds

Dictionary copy of the class object obspy.signal.spectral_estimation.PPSD(). The dictionary copy allows manipulation of the data in PPSD, whereas that data cannot be easily manipulated in the original Obspy object.

Returns

@@ -8059,7 +1853,7 @@

Returns

return self._ppsds
-
var ppsds_obspy
+
prop ppsds_obspy

The original ppsd information from the obspy.signal.spectral_estimation.PPSD(), so as to keep original if copy is manipulated/changed.

@@ -8085,57 +1879,21 @@

Parameters

type : str {'shallow', 'deep'}
Based on input, creates either a shallow or deep copy of the HVSRData object. Shallow is equivalent of copy.copy(). Input of type='deep' is equivalent of copy.deepcopy() (still experimental). Defaults to shallow.
-
- -Expand source code - -
def copy(self, type='shallow'):
-    """Make a copy of the HVSRData object. Uses python copy module.
-    
-    Parameters
-    ----------
-    type : str {'shallow', 'deep'}
-        Based on input, creates either a shallow or deep copy of the HVSRData object. Shallow is equivalent of copy.copy(). Input of type='deep' is equivalent of copy.deepcopy() (still experimental). Defaults to shallow.
-
-    """
-    if type.lower()=='deep':
-        return HVSRData(copy.deepcopy(self.params))
-    else:
-        return HVSRData(copy.copy(self.params))
-
-def export(self, export_path=None, ext='hvsr') +def export(self, hvsr_export_path=None, ext='hvsr')

Method to export HVSRData objects to .hvsr pickle files.

Parameters

-
export_path : filepath, default=True
+
hvsr_export_path : filepath, default=True
Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). By default True. -If True, it will first try to save each file to the same directory as datapath, then if that does not work, to the current working directory, then to the user's home directory, by default True
+If True, it will first try to save each file to the same directory as input_data, then if that does not work, to the current working directory, then to the user's home directory, by default True
ext : str, optional
The extension to use for the output, by default 'hvsr'. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension.
-
- -Expand source code - -
def export(self, export_path=None, ext='hvsr'):
-    """Method to export HVSRData objects to .hvsr pickle files.
-
-    Parameters
-    ----------
-    export_path : filepath, default=True
-        Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). 
-        By default True. 
-        If True, it will first try to save each file to the same directory as datapath, then if that does not work, to the current working directory, then to the user's home directory, by default True
-    ext : str, optional
-        The extension to use for the output, by default 'hvsr'. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension.
-    """
-    export_data(hvsr_data=self, export_path=export_path, ext=ext)
-
def export_settings(self, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True) @@ -8153,27 +1911,6 @@

Parameters

verbose : bool, optional
Whether to print output (filepath and settings) to terminal, by default True
-
- -Expand source code - -
def export_settings(self, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True):
-    """Method to export settings from HVSRData object. Simply calls sprit.export_settings() from the HVSRData object. See sprit.export_settings() for more details.
-
-    Parameters
-    ----------
-    export_settings_path : str, optional
-        Filepath to output file. If left as 'default', will save as the default value in the resources directory. If that is not possible, will save to home directory, by default 'default'
-    export_settings_type : str, {'all', 'instrument', 'processing'}, optional
-        They type of settings to save, by default 'all'
-    include_location : bool, optional
-        Whether to include the location information in the instrument settings, if that settings type is selected, by default False
-    verbose : bool, optional
-        Whether to print output (filepath and settings) to terminal, by default True
-    """
-    export_settings(hvsr_data=self, 
-                    export_settings_path=export_settings_path, export_settings_type=export_settings_type, include_location=include_location, verbose=verbose)
-
def get_report(self, **kwargs) @@ -8184,22 +1921,9 @@

Returns

Variable
May return nothing, pandas.Dataframe, or pyplot Figure, depending on input.
-
-
- -Expand source code - -
def get_report(self, **kwargs):
-    """Method to get report from processed data, in print, graphical, or tabular format.
-
-    Returns
-    -------
-    Variable
-        May return nothing, pandas.Dataframe, or pyplot Figure, depending on input.
-    """
-    report_return = get_report(self, **kwargs)
-    return report_return
-
+ +

See Also

+

get_report()

def items(self) @@ -8211,20 +1935,6 @@

Returns

dict_items
A dict_items object of the HVSRData objects attributes, parameters, etc.
-
- -Expand source code - -
def items(self):
-    """Method to return the "items" of the HVSRData object. For HVSRData objects, this is a dict_items object with the keys and values in tuples. Functions similar to dict.items().
-
-    Returns
-    -------
-    dict_items
-        A dict_items object of the HVSRData objects attributes, parameters, etc.
-    """                
-    return self.params.items()
-
def keys(self) @@ -8236,24 +1946,6 @@

Returns

dict_keys
A dict_keys object of the HVSRData objects attributes, parameters, etc.
-
- -Expand source code - -
def keys(self):
-    """Method to return the "keys" of the HVSRData object. For HVSRData objects, these are the attributes and parameters of the object. Functions similar to dict.keys().
-
-    Returns
-    -------
-    dict_keys
-        A dict_keys object of the HVSRData objects attributes, parameters, etc.
-    """        
-    keyList = []
-    for k in dir(self):
-        if not k.startswith('_'):
-            keyList.append(k)
-    return keyList
-
def plot(self, **kwargs) @@ -8264,39 +1956,18 @@

Returns

matplotlib.Figure, matplotlib.Axis (if return_fig=True)
 
-
-
- -Expand source code - -
def plot(self, **kwargs):
-    """Method to plot data, wrapper of sprit.plot_hvsr()
-
-    Returns
-    -------
-    matplotlib.Figure, matplotlib.Axis (if return_fig=True)
-    """
-    if 'close_figs' not in kwargs.keys():
-        kwargs['close_figs']=True
-    plot_return = plot_hvsr(self, **kwargs)
-    plt.show()
-    return plot_return
-
+ +

See Also

+

plot_hvsr() +plot_azimuth()

def report(self, **kwargs)
-

Wrapper of get_report()

-
- -Expand source code - -
def report(self, **kwargs):
-    """Wrapper of get_report()"""
-    report_return = get_report(self, **kwargs)
-    return report_return
-
+

Wrapper of get_report()

+

See Also

+

get_report()

@@ -8304,7 +1975,6 @@

Returns