Index
- -A
-- |
B
-- | - |
C
-D
-- |
E
-- | - |
F
-
|
-
|
-
G
-H
-- |
|
-
I
-- | - |
K
-- |
L
-- |
M
-
|
-
O
-- | - |
P
-R
-- | - |
S
-
|
-
|
-
T
-- |
|
-
W
-- |
X
-
|
-
From 9ac1825d23741b1923dda64d58db9624a9f86989 Mon Sep 17 00:00:00 2001 From: RJbalikian <46536937+RJbalikian@users.noreply.github.com> Date: Wed, 30 Oct 2024 11:55:10 -0500 Subject: [PATCH] update docs (including github pages?) --- docs/.readthedocs.yaml | 33 - docs/Makefile | 20 - docs/_build/html/.buildinfo | 4 - docs/_build/html/genindex.html | 759 - docs/_build/html/index.html | 356 - docs/_build/html/objects.inv | Bin 1478 -> 0 bytes docs/_build/html/py-modindex.html | 157 - docs/_build/html/searchindex.js | 1 - docs/_build/html/sprit.html | 1986 --- docs/_build/html/sprit.sprit_cli.html | 183 - docs/_build/html/sprit.sprit_hvsr.html | 1869 --- docs/_build/html/sprit.sprit_jupyter_UI.html | 180 - docs/_build/html/sprit.sprit_plot.html | 257 - .../_build/html/sprit.sprit_streamlit_ui.html | 209 - docs/_build/html/sprit.sprit_tkinter_ui.html | 237 - docs/_build/html/sprit.sprit_utils.html | 303 - docs/_generate_docs.py | 2 +- .../html/search.html => _modules/index.html} | 71 +- docs/_modules/sprit/sprit_cli.html | 224 + docs/_modules/sprit/sprit_hvsr.html | 10021 +++++++++++++ docs/_modules/sprit/sprit_jupyter_UI.html | 2393 +++ docs/_modules/sprit/sprit_plot.html | 1301 ++ docs/_modules/sprit/sprit_streamlit_ui.html | 694 + docs/_modules/sprit/sprit_tkinter_ui.html | 3250 ++++ docs/_modules/sprit/sprit_utils.html | 671 + docs/{index.rst => _sources/index.rst.txt} | 0 docs/{sprit.rst => _sources/sprit.rst.txt} | 0 .../sprit.sprit_cli.rst.txt} | 0 .../sprit.sprit_hvsr.rst.txt} | 0 .../sprit.sprit_jupyter_UI.rst.txt} | 0 .../sprit.sprit_plot.rst.txt} | 0 .../sprit.sprit_streamlit_ui.rst.txt} | 0 .../sprit.sprit_tkinter_ui.rst.txt} | 0 .../sprit.sprit_utils.rst.txt} | 0 docs/{_build/doctrees => }/environment.pickle | Bin 997846 -> 997846 bytes docs/genindex.html | 14 +- docs/{_build/doctrees => }/index.doctree | Bin docs/index.html | 14 +- docs/main.html | 7680 +--------- docs/make.bat | 35 - docs/py-modindex.html | 16 +- docs/search.html | 18 +- docs/{_build/doctrees => }/sprit.doctree | Bin docs/sprit.html | 14 +- docs/sprit.ptyhonfile.html | 106 - .../doctrees => }/sprit.sprit_cli.doctree | Bin docs/sprit.sprit_cli.html | 14 +- .../doctrees => }/sprit.sprit_hvsr.doctree | Bin docs/sprit.sprit_hvsr.html | 14 +- .../sprit.sprit_jupyter_UI.doctree | Bin docs/sprit.sprit_jupyter_UI.html | 14 +- .../doctrees => }/sprit.sprit_plot.doctree | Bin docs/sprit.sprit_plot.html | 14 +- .../sprit.sprit_streamlit_ui.doctree | Bin docs/sprit.sprit_streamlit_ui.html | 14 +- .../sprit.sprit_tkinter_ui.doctree | Bin docs/sprit.sprit_tkinter_ui.html | 14 +- .../doctrees => }/sprit.sprit_utils.doctree | Bin docs/sprit.sprit_utils.html | 14 +- docs/sprit_cli.html | 254 +- docs/sprit_gui.html | 9234 ------------ docs/sprit_hvsr.html | 12266 +--------------- docs/sprit_jupyter_UI.html | 4539 +----- docs/sprit_plot.html | 167 + docs/sprit_streamlit_ui.html | 111 + docs/sprit_tkinter_ui.html | 3075 ++++ docs/sprit_utils.html | 918 +- docs/theme.css | 4 - docs/theme.js | 1 - sprit/ui_tests/RemiTest.py | 44 - sprit/ui_tests/__init__,py | 8 - sprit/ui_tests/remiOverview.py | 314 - sprit/ui_tests/sprit_nice.py | 45 - sprit/ui_tests/sprit_remi.py | 307 - 74 files changed, 23384 insertions(+), 41079 deletions(-) delete mode 100644 docs/.readthedocs.yaml delete mode 100644 docs/Makefile delete mode 100644 docs/_build/html/.buildinfo delete mode 100644 docs/_build/html/genindex.html delete mode 100644 docs/_build/html/index.html delete mode 100644 docs/_build/html/objects.inv delete mode 100644 docs/_build/html/py-modindex.html delete mode 100644 docs/_build/html/searchindex.js delete mode 100644 docs/_build/html/sprit.html delete mode 100644 docs/_build/html/sprit.sprit_cli.html delete mode 100644 docs/_build/html/sprit.sprit_hvsr.html delete mode 100644 docs/_build/html/sprit.sprit_jupyter_UI.html delete mode 100644 docs/_build/html/sprit.sprit_plot.html delete mode 100644 docs/_build/html/sprit.sprit_streamlit_ui.html delete mode 100644 docs/_build/html/sprit.sprit_tkinter_ui.html delete mode 100644 docs/_build/html/sprit.sprit_utils.html rename docs/{_build/html/search.html => _modules/index.html} (51%) create mode 100644 docs/_modules/sprit/sprit_cli.html create mode 100644 docs/_modules/sprit/sprit_hvsr.html create mode 100644 docs/_modules/sprit/sprit_jupyter_UI.html create mode 100644 docs/_modules/sprit/sprit_plot.html create mode 100644 docs/_modules/sprit/sprit_streamlit_ui.html create mode 100644 docs/_modules/sprit/sprit_tkinter_ui.html create mode 100644 docs/_modules/sprit/sprit_utils.html rename docs/{index.rst => _sources/index.rst.txt} (100%) rename docs/{sprit.rst => _sources/sprit.rst.txt} (100%) rename docs/{sprit.sprit_cli.rst => _sources/sprit.sprit_cli.rst.txt} (100%) rename docs/{sprit.sprit_hvsr.rst => _sources/sprit.sprit_hvsr.rst.txt} (100%) rename docs/{sprit.sprit_jupyter_UI.rst => _sources/sprit.sprit_jupyter_UI.rst.txt} (100%) rename docs/{sprit.sprit_plot.rst => _sources/sprit.sprit_plot.rst.txt} (100%) rename docs/{sprit.sprit_streamlit_ui.rst => _sources/sprit.sprit_streamlit_ui.rst.txt} (100%) rename docs/{sprit.sprit_tkinter_ui.rst => _sources/sprit.sprit_tkinter_ui.rst.txt} (100%) rename docs/{sprit.sprit_utils.rst => _sources/sprit.sprit_utils.rst.txt} (100%) rename docs/{_build/doctrees => }/environment.pickle (97%) rename docs/{_build/doctrees => }/index.doctree (100%) delete mode 100644 docs/make.bat rename docs/{_build/doctrees => }/sprit.doctree (100%) delete mode 100644 docs/sprit.ptyhonfile.html rename docs/{_build/doctrees => }/sprit.sprit_cli.doctree (100%) rename docs/{_build/doctrees => }/sprit.sprit_hvsr.doctree (100%) rename docs/{_build/doctrees => }/sprit.sprit_jupyter_UI.doctree (100%) rename docs/{_build/doctrees => }/sprit.sprit_plot.doctree (100%) rename docs/{_build/doctrees => }/sprit.sprit_streamlit_ui.doctree (100%) rename docs/{_build/doctrees => }/sprit.sprit_tkinter_ui.doctree (100%) rename docs/{_build/doctrees => }/sprit.sprit_utils.doctree (100%) delete mode 100644 docs/sprit_gui.html create mode 100644 docs/sprit_plot.html create mode 100644 docs/sprit_streamlit_ui.html create mode 100644 docs/sprit_tkinter_ui.html delete mode 100644 docs/theme.css delete mode 100644 docs/theme.js delete mode 100644 sprit/ui_tests/RemiTest.py delete mode 100644 sprit/ui_tests/__init__,py delete mode 100644 sprit/ui_tests/remiOverview.py delete mode 100644 sprit/ui_tests/sprit_nice.py delete mode 100644 sprit/ui_tests/sprit_remi.py diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml deleted file mode 100644 index c26d98f5..00000000 --- a/docs/.readthedocs.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# .readthedocs.yaml -# Read the Docs configuration file -# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details - -# Required -version: 2 - -# Set the OS, Python version and other tools you might need -build: - os: ubuntu-22.04 - tools: - python: "3.12" - # You can also specify other tool versions: - # nodejs: "19" - # rust: "1.64" - # golang: "1.19" - -# Build documentation in the "docs/" directory with Sphinx -sphinx: - configuration: docs/conf.py - builder: html - -# Optionally build your docs in additional formats such as PDF and ePub -formats: - - pdf -# - epub - -# Optional but recommended, declare the Python requirements required -# to build your documentation -# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html -python: - install: - - requirements: docs/requirements.txt \ No newline at end of file diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index d4bb2cbb..00000000 --- a/docs/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/_build/html/.buildinfo b/docs/_build/html/.buildinfo deleted file mode 100644 index 52fb83ee..00000000 --- a/docs/_build/html/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 2f3f51ea76c87fa1efbf5ede4657fbc2 -tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/_build/html/genindex.html b/docs/_build/html/genindex.html deleted file mode 100644 index 23c7f332..00000000 --- a/docs/_build/html/genindex.html +++ /dev/null @@ -1,759 +0,0 @@ - - -
- - -- |
- | - |
- |
- | - |
|
-
|
-
- |
|
-
- | - |
- |
- |
|
-
- | - |
- | - |
|
-
|
-
- |
|
-
- |
|
-
HVSRBatch
-HVSRData
-assert_check()
batch_data_read()
calculate_azimuth()
catch_errors()
check_gui_requirements()
check_mark()
check_peaks()
check_tsteps()
check_xvalues()
checkifpath()
create_jupyter_ui()
export_data()
export_settings()
fetch_data()
format_time()
generate_ppsds()
get_char()
get_metadata()
get_report()
gui()
has_required_channels()
import_data()
import_settings()
input_params()
make_it_classy()
parse_plot_string()
plot_azimuth()
plot_hvsr()
plot_outlier_curves()
plot_preview()
plot_results()
process_hvsr()
read_from_RS()
read_tromino_files()
remove_noise()
remove_outlier_curves()
run()
time_it()
x_mark()
HVSRBatch
HVSRData
batch_data_read()
calculate_azimuth()
check_instance()
check_peaks()
export_data()
export_report()
export_settings()
fetch_data()
generate_ppsds()
get_metadata()
get_report()
gui()
gui_test()
import_data()
import_settings()
input_params()
plot_azimuth()
plot_hvsr()
plot_stream()
process_hvsr()
read_tromino_files()
remove_noise()
remove_outlier_curves()
run()
test_function()
get_check_peaks_kwargs()
get_generate_ppsd_kwargs()
get_get_report_kwargs()
get_process_hvsr_kwargs()
get_remove_noise_kwargs()
get_remove_outlier_curve_kwargs()
parse_comp_plot_list()
parse_hv_plot_list()
parse_plot_string()
parse_spec_plot_list()
plot_outlier_curves()
plot_preview()
plot_results()
process_data()
read_data()
HVSRBatch
-HVSRData
-batch_data_read()
calculate_azimuth()
check_instance()
check_peaks()
export_data()
export_report()
export_settings()
fetch_data()
generate_ppsds()
get_metadata()
get_report()
gui()
gui_test()
import_data()
import_settings()
input_params()
plot_azimuth()
plot_hvsr()
plot_stream()
process_hvsr()
read_tromino_files()
remove_noise()
remove_outlier_curves()
run()
test_function()
- s | ||
- |
- sprit | - |
- |
- sprit.sprit_cli | - |
- |
- sprit.sprit_hvsr | - |
- |
- sprit.sprit_jupyter_UI | - |
- |
- sprit.sprit_plot | - |
- |
- sprit.sprit_streamlit_ui | - |
- |
- sprit.sprit_tkinter_ui | - |
- |
- sprit.sprit_utils | - |
This module analysis ambient seismic data using the Horizontal to Vertical Spectral Ratio (HVSR) technique
-Bases: object
HVSRBatch is the data container used for batch processing. -It contains several HVSRData objects (one for each site). -These can be accessed using their site name, -either square brackets (HVSRBatchVariable[“SiteName”]) or the dot (HVSRBatchVariable.SiteName) accessor.
-The dot accessor may not work if there is a space in the site name.
-All of the functions in the sprit package are designed to perform the bulk of their operations iteratively -on the individual HVSRData objects contained in the HVSRBatch object, and do little with the HVSRBatch object itself, -besides using it determine which sites are contained within it.
-Methods
-
|
-Make a copy of the HVSRBatch object. |
-
|
-Method to export HVSRData objects in HVSRBatch container to indivdual .hvsr pickle files. |
-
|
-Method to export settings from HVSRData object in HVSRBatch object. |
-
|
-Method to get report from processed data, in print, graphical, or tabular format. |
-
|
-Method to return both the site names and the HVSRData object as a set of dict_items tuples. |
-
|
-Method to return the "keys" of the HVSRBatch object. |
-
|
-Method to plot data, based on the sprit.plot_hvsr() function. |
-
|
-Wrapper of get_report() |
-
Make a copy of the HVSRBatch object. Uses python copy module.
-Based on input, creates either a shallow or deep copy of the HVSRBatch object. Shallow is equivalent of copy.copy(). Input of ‘deep’ is equivalent of copy.deepcopy() (still experimental). Defaults to shallow.
-Method to export HVSRData objects in HVSRBatch container to indivdual .hvsr pickle files.
-Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). By default True. If True, it will first try to save each file to the same directory as input_data, then if that does not work, to the current working directory, then to the user’s home directory, by default True
-The extension to use for the output, by default ‘hvsr’. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension.
-Method to export settings from HVSRData object in HVSRBatch object.
-Simply calls sprit.export_settings() from specified HVSRData object in the HVSRBatch object. -See sprit.export_settings() for more details.
-The name of the site whose settings should be exported. If None, will default to the first site, by default None.
-Filepath to output file. If left as ‘default’, will save as the default value in the resources directory. If that is not possible, will save to home directory, by default ‘default’
-They type of settings to save, by default ‘all’
-Whether to include the location information in the instrument settings, if that settings type is selected, by default False
-Whether to print output (filepath and settings) to terminal, by default True
-See also
- -Method to get report from processed data, in print, graphical, or tabular format.
-May return nothing, pandas.Dataframe, or pyplot Figure, depending on input.
-See also
-get_report
Method to return both the site names and the HVSRData object as a set of dict_items tuples. Functions similar to dict.items().
-_description_
-Method to return the “keys” of the HVSRBatch object. For HVSRBatch objects, these are the site names. Functions similar to dict.keys().
-A dict_keys object listing the site names of each of the HVSRData objects contained in the HVSRBatch object
-Method to plot data, based on the sprit.plot_hvsr() function.
-All the same kwargs and default values apply as plot_hvsr(). -For return_fig, returns it to the ‘Plot_Report’ attribute of each HVSRData object
-_description_
-See also
-plot_hvsr
Wrapper of get_report()
-See also
-get_report
Bases: object
HVSRData is the basic data class of the sprit package. -It contains all the processed data, input parameters, and reports.
-These attributes and objects can be accessed using square brackets or the dot accessor. For example, to access the site name, HVSRData[‘site’] and HVSRData.site will both return the site name.
-Some of the methods that work on the HVSRData object (e.g., .plot() and .get_report()) are essentially wrappers for some of the main sprit package functions (sprit.plot_hvsr() and sprit.get_report(), respectively)
-batch
Whether this HVSRData object is part of an HVSRBatch object.
-datastream
A copy of the original obspy datastream read in.
-params
Dictionary containing the parameters used to process the data
-ppsds
Dictionary copy of the class object obspy.signal.spectral_estimation.PPSD().
-ppsds_obspy
The original ppsd information from the obspy.signal.spectral_estimation.PPSD(), so as to keep original if copy is manipulated/changed.
-Methods
-
|
-Make a copy of the HVSRData object. |
-
|
-Method to export HVSRData objects to .hvsr pickle files. |
-
|
-Method to export settings from HVSRData object. |
-
|
-Method to get report from processed data, in print, graphical, or tabular format. |
-
|
-Method to return the "items" of the HVSRData object. |
-
|
-Method to return the "keys" of the HVSRData object. |
-
|
-Method to plot data, wrapper of sprit.plot_hvsr() |
-
|
-Wrapper of get_report() |
-
Whether this HVSRData object is part of an HVSRBatch object. This is used throughout the code to help direct the object into the proper processing pipeline.
-True if HVSRData object is part of HVSRBatch object, otherwise, False
-Make a copy of the HVSRData object. Uses python copy module.
-Based on input, creates either a shallow or deep copy of the HVSRData object. Shallow is equivalent of copy.copy(). Input of type=’deep’ is equivalent of copy.deepcopy() (still experimental). Defaults to shallow.
-A copy of the original obspy datastream read in. This helps to retain the original data even after processing is carried out.
-Obspy stream
-Method to export HVSRData objects to .hvsr pickle files.
-Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). -By default True. -If True, it will first try to save each file to the same directory as input_data, then if that does not work, to the current working directory, then to the user’s home directory, by default True
-The extension to use for the output, by default ‘hvsr’. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension.
-Method to export settings from HVSRData object. Simply calls sprit.export_settings() from the HVSRData object. See sprit.export_settings() for more details.
-Filepath to output file. If left as ‘default’, will save as the default value in the resources directory. If that is not possible, will save to home directory, by default ‘default’
-They type of settings to save, by default ‘all’
-Whether to include the location information in the instrument settings, if that settings type is selected, by default False
-Whether to print output (filepath and settings) to terminal, by default True
-Method to get report from processed data, in print, graphical, or tabular format.
-May return nothing, pandas.Dataframe, or pyplot Figure, depending on input.
-See also
-get_report
Method to return the “items” of the HVSRData object. For HVSRData objects, this is a dict_items object with the keys and values in tuples. Functions similar to dict.items().
-A dict_items object of the HVSRData objects attributes, parameters, etc.
-Method to return the “keys” of the HVSRData object. For HVSRData objects, these are the attributes and parameters of the object. Functions similar to dict.keys().
-A dict_keys object of the HVSRData objects attributes, parameters, etc.
-Dictionary containing the parameters used to process the data
-Dictionary containing the process parameters
-Method to plot data, wrapper of sprit.plot_hvsr()
-See also
- -Dictionary copy of the class object obspy.signal.spectral_estimation.PPSD(). The dictionary copy allows manipulation of the data in PPSD, whereas that data cannot be easily manipulated in the original Obspy object.
-Dictionary copy of the PPSD information from generate_ppsds()
-The original ppsd information from the obspy.signal.spectral_estimation.PPSD(), so as to keep original if copy is manipulated/changed.
-Wrapper of get_report()
-See also
-get_report
Function to read data in data as a batch of multiple data files. This is best used through sprit.fetch_data(*args, source=’batch’, **other_kwargs).
-Input data information for how to read in data as batch. Can be filepath or list of filepaths/stream objects. -If filepath, should point to .csv (or similar that can be read by pandas.read_csv()) with batch data information.
-Type of batch read, only ‘table’ and ‘filelist’ accepted. -If ‘table’, will read data from a file read in using pandas.read_csv(), by default ‘table’
-Name of parameter column from batch information file. Only used if a batch_type=’table’ and single parameter column is used, rather than one column per parameter (for single parameter column, parameters are formatted with = between keys/values and , between item pairs), by default None
-Parameters to be used if batch_type=’filelist’. If it is a list, needs to be the same length as batch_data. If it is a dict, will be applied to all files in batch_data and will combined with extra keyword arguments caught by **readcsv_getMeta_fetch_kwargs.
-Whether to print information to terminal during batch read, by default False
-Keyword arguments that will be read into pandas.read_csv(), sprit.input_params, sprit.get_metadata(), and/or sprit.fetch_data()
-HVSRBatch object with each item representing a different HVSRData object
-_description_
-Function to calculate azimuthal horizontal component at specified angle(s). Adds each new horizontal component as a radial component to obspy.Stream object at hvsr_data[‘stream’]
-Input HVSR data
-If azimuth_type=’multiple’, this is the angular step (in unit azimuth_unit) of each of the azimuthal measurements. -If azimuth_type=’single’ this is the angle (in unit azimuth_unit) of the single calculated azimuthal measruement. By default 10.
-What type of azimuthal measurement to make, by default ‘multiple’. -If ‘multiple’ (or {‘multi’, ‘mult’, ‘m’}), will take a measurement at each angular step of azimuth_angle of unit azimuth_unit. -If ‘single’ (or {‘sing’, ‘s’}), will take a single azimuthal measurement at angle specified in azimuth_angle.
-Angular unit used to specify azimuth_angle parameter. By default ‘degrees’. -If ‘degrees’ (or {‘deg’, ‘d’}), will use degrees. -If ‘radians’ (or {‘rad’, ‘r’}), will use radians.
-Whether to show azimuthal plot, by default False.
-Whether to print terminal output, by default False
-Updated HVSRData object specified in hvsr_data with hvsr_data[‘stream’] attribute containing additional components (EHR-*), -with * being zero-padded (3 digits) azimuth angle in degrees.
-The default Windows terminal is not able to display the check mark character correctly. -This function returns another displayable character if platform is Windows
-Function to run tests on HVSR peaks to find best one and see if it passes quality checks
-Dictionary containing all the calculated information about the HVSR data (i.e., hvsr_out returned from process_hvsr)
-2-item tuple or list with lower and upper limit of frequencies to analyze
-How to select the “best” peak used in the analysis. For peak_selection=”max” (default value), the highest peak within peak_freq_range is used. -For peak_selection=’scored’, an algorithm is used to select the peak based in part on which peak passes the most SESAME criteria. -If a numeric value is used (e.g., int or float), this should be a frequency value to manually select as the peak of interest.
-The frequency range within which to check for peaks. If there is an HVSR curve with multiple peaks, this allows the full range of data to be processed while limiting peak picks to likely range.
-Whether to print results and inputs to terminal.
-Object containing previous input data, plus information about peak tests
-Check time steps of PPSDS to make sure they are all the same length
-Check x_values of PPSDS to make sure they are all the same length
-Support function to check if a filepath is a pathlib.Path object and tries to convert if not
-Filepath to check. If not a valid filepath, will not convert and raises error
-pathlib.Path of filepath
-Export data into pickle format that can be read back in using import_data() so data does not need to be processed each time. -Default extension is .hvsr but it is still a pickled file that can be read in using pickle.load().
-Data to be exported
-String or filepath object to be read by pathlib.Path() and/or a with open(hvsr_export_path, ‘wb’) statement. If None, defaults to input input_data directory, by default None
-Filepath extension to use for data file, by default ‘hvsr’
-Save settings to json file
-Where to save the json file(s) containing the settings, by default ‘default’. -If “default,” will save to sprit package resources. Otherwise, set a filepath location you would like for it to be saved to. -If ‘all’ is selected, a directory should be supplied. -Otherwise, it will save in the directory of the provided file, if it exists. Otherwise, defaults to the home directory.
-What kind of settings to save. -If ‘all’, saves all possible types in their respective json files. -If ‘instrument’, save the instrument settings to their respective file. -If ‘processing’, saves the processing settings to their respective file. By default ‘all’
-Whether to include the location parametersin the exported settings document.This includes xcoord, ycoord, elevation, elev_unit, and input_crs
-Whether to print outputs and information to the terminal
-Fetch ambient seismic data from a source to read into obspy stream
-Parameters defined using input_params() function.
-‘raw’ finds raspberry shake data, from raw output copied using scp directly from Raspberry Shake, either in folder or subfolders; -‘dir’ is used if the day’s 3 component files (currently Raspberry Shake supported only) are all 3 contained in a directory by themselves. -‘file’ is used if the params[‘input_data’] specified in input_params() is the direct filepath to a single file to be read directly into an obspy stream. -‘batch’ is used to read a list or specified set of seismic files.
---Most commonly, a csv file can be read in with all the parameters. Each row in the csv is a separate file. Columns can be arranged by parameter.
-
If None (or False), data is not trimmed in this function. -Otherwise, this is the directory to save trimmed and exported data.
-If data_export_path is not None, this is the format in which to save the data
-If False, data is not detrended. -Otherwise, this should be a string accepted by the type parameter of the obspy.core.trace.Trace.detrend method: https://docs.obspy.org/packages/autogen/obspy.core.trace.Trace.detrend.html
-If detrend parameter is ‘spline’ or ‘polynomial’, this is passed directly to the order parameter of obspy.core.trace.Trace.detrend method.
-Whether to update the metadata file, used primarily with Raspberry Shake data which uses a generic inventory file.
-Whether to plot the raw input stream. This plot includes a spectrogram (Z component) and the raw (with decimation for speed) plots of each component signal.
-Which plotting library/engine to use for plotting the Input stream. Options are ‘matplotlib’, ‘plotly’, or ‘obspy’ (not case sensitive).
-Whether to print outputs and inputs to the terminal
-Keywords arguments, primarily for ‘batch’ and ‘dir’ sources
-Same as params parameter, but with an additional “stream” attribute with an obspy data stream with 3 traces: Z (vertical), N (North-south), and E (East-west)
-Private function to format time, used in other functions
-Formats input time to datetime objects in utc
-Input datetime. Can include date and time, just date (time inferred to be 00:00:00.00) or just time (if so, date is set as today)
-If string and not utc, assumed to be timezone of computer running the process. -If int, assumed to be offset from UTC (e.g., CST in the United States is -6; CDT in the United States is -5)
-Output datetime.datetime object, now in UTC time.
-Generates PPSDs for each channel
-Channels need to be in Z, N, E order -Info on PPSD creation here: https://docs.obspy.org/packages/autogen/obspy.signal.spectral_estimation.PPSD.html
-Data object containing all the parameters and other data of interest (stream and paz, for example)
-Whether to generate PPSDs for azimuthal data
-Whether to print inputs and results to terminal
-Dictionary with keyword arguments that are passed directly to obspy.signal.PPSD. -If the following keywords are not specified, their defaults are amended in this function from the obspy defaults for its PPSD function. Specifically:
----
-- -
ppsd_length defaults to 30 (seconds) here instead of 3600
- -
skip_on_gaps defaults to True instead of False
- -
period_step_octaves defaults to 0.03125 instead of 0.125
Dictionary containing entries with ppsds for each channel
-Get metadata and calculate or get paz parameter needed for PPSD
-Ouput from input_params() function
-If not specified, does not write file
-Whether to update the metadata file itself, or just read as-is. If using provided raspberry shake metadata file, select True.
-This passes the source variable value to _read_RS_metadata. It is expected that this is passed directly from the source parameter of sprit.fetch_data()
-Modified input dictionary with additional key:value pair containing paz dictionary (key = “paz”)
-Generate and/or print and/or export a report of the HVSR analysis in a variety of formats.
-Formats include: -* ‘print’: A (monospace) text summary of the HVSR results -* ‘table’: A pandas.DataFrame summary of the HVSR Results.
---This is useful for copy/pasting directly into a larger worksheet.
-
‘plot’: A plot summary of the HVSR results, generated using the plot_hvsr() function.
‘html’: An HTML document/text of the HVSR results. This includes the table, print, and plot reports in one document.
The PDF report is simply the HTML report saved to an A4-sized PDF document.
-Dictionary containing all the information about the processed hvsr data
-Format in which to print or export the report. -The following report_formats return the following items in the following attributes:
----
-- -
‘plot’: hvsr_results[‘Print_Report’] as a str
- -
‘print’: hvsr_results[‘HV_Plot’] - matplotlib.Figure object
- -
-
-- ‘table’: hvsr_results[‘Table_Report’]- pandas.DataFrame object
- -
-
-- -
list/tuple - a list or tuple of the above objects, in the same order they are in the report_formats list
- -
‘html’: hvsr_results[‘HTML_Report’] - a string containing the text for an HTML document
- -
‘pdf’: currently does not save to the HVSRData object itself, can only be saved to the disk directly
What type of plot to plot, if ‘plot’ part of report_formats input
-Which azimuth to plot, by default “HV” which is the main “azimuth” combining the E and N components
-How to handle table report outputs if the designated csv output file already exists. By default, appends the new information to the end of the existing file.
-If True, only reads output to appropriate attribute of data class (ie, print does not print, only reads text into variable). If False, performs as normal.
-A string or list of strings indicating which report formats should be exported to disk.
-If None or False, does not export; if True, will export to same directory as the input_data parameter in the input_params() function. -Otherwise, it should be a string or path object indicating where to export results. May be a file or directory. -If a directory is specified, the filename will be “<site_name>_<acq_date>_<UTC start time>-<UTC end time>”. -The extension/suffix defaults to png for report_formats=”plot”, csv for ‘table’, txt for ‘print’, html for ‘html’, and pdf for ‘pdf.’
-Whether to print the results to terminal. This is the same output as report_formats=’print’, and will not repeat if that is already selected
-Function to open a graphical user interface (gui)
-What type of gui to open. “default” opens regular windowed interface, -“widget” opens jupyter widget’ -“lite” open lite (pending update), by default ‘default’
-Function to import .hvsr (or other extension) data exported using export_data() function
-Filepath of file created using export_data() function. This is usually a pickle file with a .hvsr extension
-Type of format data is in. Currently, only ‘pickle’ supported. Eventually, json or other type may be supported, by default ‘pickle’.
-Function for designating input parameters for reading in and processing data
-Filepath of data. This can be a directory or file, but will need to match with what is chosen later as the source parameter in fetch_data()
-Site name as designated by user for ease of reference. Used for plotting titles, filenames, etc.
-A prefix that may be used to create unique identifiers for each site. -The identifier created is saved as the [‘HVSR_ID’] attribute of the HVSRData object, -and is equivalent to the following formatted string: -f”{id_prefix}-{acq_date.strftime(“%Y%m%d”)}-{starttime.strftime(“%H%M”)}-{station}”.
-The network designation of the seismometer. This is necessary for data from Raspberry Shakes. ‘AM’ is for Amateur network, which fits Raspberry Shakes.
-The station name of the seismometer. This is necessary for data from Raspberry Shakes.
-Location information of the seismometer.
-The three channels used in this analysis, as a list of strings. Preferred that Z component is first, but not necessary
-If string, preferred format is ‘YYYY-MM-DD’. -If int, this will be interpreted as the time_int of year of current year (e.g., 33 would be Feb 2 of current year) -If date or datetime object, this will be the date. Make sure to account for time change when converting to UTC (if UTC is the following time_int, use the UTC time_int).
-Start time of data stream. This is necessary for Raspberry Shake data in ‘raw’ form, or for trimming data. Format can be either ‘HH:MM:SS.micros’ or ‘HH:MM’ at minimum.
-End time of data stream. This is necessary for Raspberry Shake data in ‘raw’ form, or for trimming data. Same format as starttime.
-Timezone of input data. If string, ‘UTC’ will use the time as input directly. Any other string value needs to be a TZ identifier in the IANA database, a wikipedia page of these is available here: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. -If int, should be the int value of the UTC offset (e.g., for American Eastern Standard Time: -5). -This is necessary for Raspberry Shake data in ‘raw’ format.
-Longitude (or easting, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in table output, but will likely be used in future for mapping/profile purposes.
-Latitute (or northing, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in table output, but will likely be used in future for mapping/profile purposes.
-Coordinate reference system of input data, as used by pyproj.CRS.from_user_input()
-Coordinate reference system to which input data will be transformed, as used by pyproj.CRS.from_user_input()
-Surface elevation of data point. Not currently used (except in table output), but will likely be used in the future.
-Depth of seismometer. Not currently used, but will likely be used in the future.
-Instrument from which the data was acquired.
-Filepath of metadata, in format supported by obspy.read_inventory. If default value of None, will read from resources folder of repository (only supported for Raspberry Shake).
-Two-element list containing low and high “corner” frequencies (in Hz) for processing. This can specified again later.
-Two-element list or tuple containing low and high frequencies (in Hz) that are used to check for HVSR Peaks. This can be a tigher range than hvsr_band, but if larger, it will still only use the hvsr_band range.
-If filepath, should point to a .proc json file with processing parameters (i.e, an output from sprit.export_settings()). -Note that this only applies to parameters for the functions: ‘fetch_data’, ‘remove_noise’, ‘generate_ppsds’, ‘process_hvsr’, ‘check_peaks’, and ‘get_report.’ -If dictionary, dictionary containing nested dictionaries of function names as they key, and the parameter names/values as key/value pairs for each key. -If a function name is not present, or if a parameter name is not present, default values will be used. -For example:
---{ ‘fetch_data’ : {‘source’:’batch’, ‘data_export_path’:”/path/to/trimmed/data”, ‘data_export_format’:’mseed’, ‘detrend’:’spline’, ‘plot_input_stream’:True, ‘verbose’:False, kwargs:{‘kwargskey’:’kwargsvalue’}} }
-
Whether to print output and results to terminal
-sprit.HVSRData class containing input parameters, including data file path and metadata path. This will be used as an input to other functions. If batch processing, params will be converted to batch type in fetch_data() step.
-Function to plot azimuths when azimuths are calculated
-HVSRData that has gone through at least the sprit.fetch_data() step, and before sprit.generate_ppsds()
-Whether to display the peak value at each azimuth calculated on the chart, by default False
-Whether to interpolate the azimuth data to get a smoother plot. -This is just for visualization, does not change underlying data. -It takes a lot of time to process the data, but interpolation for vizualization can happen fairly fast. By default True.
-Whether to display the grid on the chart, by default False
-Figure and axis of resulting azimuth plot
-Function to plot HVSR data
-Dictionary containing output from process_hvsr function
-The plot_type of plot(s) to plot. If list, will plot all plots listed -- ‘HVSR’ - Standard HVSR plot, including standard deviation. Options are included below:
----
-- -
‘p’ shows a vertical dotted line at frequency of the “best” peak
- -
‘ann’ annotates the frequency value of of the “best” peak
- -
‘all’ shows all the peaks identified in check_peaks() (by default, only the max is identified)
- -
‘t’ shows the H/V curve for all time windows
- -
‘tp’ shows all the peaks from the H/V curves of all the time windows
- -
‘fr’ shows the window within which SpRIT will search for peak frequencies, as set by peak_freq_range
- -
-
-- ‘test’ shows a visualization of the results of the peak validity test(s). Examples:
- -
-
-- -
‘tests’ visualizes the results of all the peak tests (not the curve tests)
- -
-
-- ‘test12’ shows the results of tests 1 and 2.
- -
-
-- -
Append any number 1-6 after ‘test’ to show a specific test result visualized
‘+’ (as a suffix in ‘C+’ or ‘COMP+’) plots C on a plot separate from HVSR (C+ is default, but without + will plot on the same plot as HVSR)
‘p’ shows a vertical dotted line at frequency of the “best” peak
‘ann’ annotates the frequency value of of the “best” peak
‘all’ shows all the peaks identified in check_peaks() (by default, only the max is identified)
‘t’ shows the H/V curve for all time windows
‘p’ shows a horizontal dotted line at the frequency of the “best” peak
‘ann’ annotates the frequency value of the “best” peak
‘all’ shows all the peaks identified in check_peaks()
‘tp’ shows all the peaks of the H/V curve at all time windows
‘p’ shows a point at each calculated (not interpolated) azimuth peak
‘g’ shows grid lines at various angles
This is the default, so usually ‘i’ is not needed.
-‘-i’ prohibits interpolation (only shows the calculated azimuths, as determined by azimuth_angle (default = 30))
What ‘azimuth’ to plot, default being standard N E components combined
-Whether to output the plots as subplots (True) or as separate plots (False)
-If not None, matplotlib figure on which plot is plotted
-If not None, matplotlib axis on which plot is plotted
-Whether to return figure and axis objects
-Which engine to use for plotting. Both “matplotlib” and “plotly” are acceptable. For shorthand, ‘mpl’, ‘m’ also work for matplotlib; ‘plty’ or ‘p’ also work for plotly. Not case sensitive.
-Directory in which to save figures
-Suffix to add to end of figure filename(s), if save_dir is used
-Whether to show legend in plot
-Whether to show plot
-Whether to close figures before plotting
-Whether to clear figures before plotting
-Keyword arguments for matplotlib.pyplot
-Returns figure and axis matplotlib.pyplot objects if return_fig=True, otherwise, simply plots the figures
-Process the input data and get HVSR data
-This is the main function that uses other (private) functions to do -the bulk of processing of the HVSR data and the data quality checks.
-Data object containing all the parameters input and generated by the user (usually, during sprit.input_params(), sprit.fetch_data(), sprit.generate_ppsds() and/or sprit.remove_noise()).
-(not used)
‘Diffuse field assumption’ H = √( (eie_E + eie_N) / eie_Z), eie = equal interval energy
‘Arithmetic Mean’ H ≡ (HN + HE)/2
‘Geometric Mean’ H ≡ √(HN · HE), recommended by the SESAME project (2004)
‘Vector Summation’ H ≡ √(HN^2 + HE^2)
‘Quadratic Mean’ H ≡ √(HN^2 + HE^2)/2
‘Maximum Horizontal Value’ H ≡ max {HN, HE}
‘Minimum Horizontal Valey’ H ≡ min {HN, HE}
‘Single Azimuth’ H = H2·cos(az) + H1·sin(az)
If True, default to smooth H/V curve to using savgoy filter with window length of 51 (works well with default resample of 1000 pts) -If int, the length of the window in the savgoy filter.
-The Konno & Ohmachi method uses the obspy.signal.konnoohmachismoothing.konno_ohmachi_smoothing() function: https://docs.obspy.org/packages/autogen/obspy.signal.konnoohmachismoothing.konno_ohmachi_smoothing.html
The constant method uses a window of constant length f_smooth_width
The proportional method uses a window the percentage length of the frequncy steps/range (f_smooth_width now refers to percentage)
See here for more information: https://www.geopsy.org/documentation/geopsy/hv-processing.html
-For ‘konno ohmachi’: passed directly to the bandwidth parameter of the konno_ohmachi_smoothing() function, determines the width of the smoothing peak, with lower values resulting in broader peak. Must be > 0.
For ‘constant’: the size of a triangular smoothing window in the number of frequency steps
For ‘proportional’: the size of a triangular smoothing window in percentage of the number of frequency steps (e.g., if 1000 frequency steps/bins and f_smooth_width=40, window would be 400 steps wide)
If True, default to resample H/V data to include 1000 frequency values for the rest of the analysis -If int, the number of data points to interpolate/resample/smooth the component psd/HV curve data to.
-If False, outlier curve removal is not carried out here. -If True, defaults to 98 (98th percentile). -Otherwise, float of percentile used as rmse_thresh of remove_outlier_curve().
-The azimuth angle to use when method is single azimuth.
-Whether to print output to terminal
-Dictionary containing all the information about the data, including input parameters
-Function to read data from tromino. Specifically, this has been lightly tested on Tromino 3G+ machines
-The input parameter _datapath_ from sprit.input_params()
-The parameters as read in from input_params() and and fetch_data()
-Whether to print results to terminal, by default False
-An obspy.Stream object containing the trace data from the Tromino instrument
-Function to remove noisy windows from data, using various methods.
-Methods include -- Manual window selection (by clicking on a chart with spectrogram and stream data), -- Auto window selection, which does the following two in sequence (these can also be done indepently):
----
-- -
A sta/lta “antitrigger” method (using stalta values to automatically remove triggered windows where there appears to be too much noise)
- -
A noise threshold method, that cuts off all times where the noise threshold equals more than (by default) 80% of the highest amplitude noise sample for the length specified by lta (in seconds)
- -
A saturation threshold method, that cuts off all times where the noise threshold equals more than (by default) 99.5% of the highest amplitude noise sample.
Dictionary containing all the data and parameters for the HVSR analysis
-The different methods for removing noise from the dataset. A list of strings will also work, in which case, it should be a list of the above strings. See descriptions above for what how each method works. By default ‘auto.’ -If remove_method=’auto’, this is the equivalent of remove_method=[‘noise threshold’, ‘antitrigger’, ‘saturation threshold’, ‘warm_cool’]
-A list/tuple of two items [s, e] or a list/tuple of two-item lists/tuples [[s0, e0], [s1,e1],…[sn, en]] with start and end time(s) for windows to keep for processing. -Data outside of these times will be excluded from processing. -Times should be obspy.UTCDateTime objects to ensure precision, but time strings (“13:05”) will also work in most cases (excpetions may be when the data stream starts/ends on different UTC days)
-Percentage (between 0 and 1), to use as the threshold at which to remove data. This is used in the saturation method. By default 0.995. -If a value is passed that is greater than 1, it will be divided by 100 to obtain the percentage.
-Percentage (between 0 and 1), to use as the threshold at which to remove data, if it persists for longer than time (in seconds (specified by min_win_size)). This is used in the noise threshold method. By default 0.8. -If a value is passed that is greater than 1, it will be divided by 100 to obtain the percentage.
-Short term average (STA) window (in seconds), by default 2. For use with sta/lta antitrigger method.
-Long term average (STA) window (in seconds), by default 30. For use with sta/lta antitrigger method.
-Two-item list or tuple with the thresholds for the stalta antitrigger. The first value (index [0]) is the lower threshold, the second value (index [1] is the upper threshold), by default [0.5,5]
-Time in seconds to allow for warmup of the instrument (or while operator is still near instrument). This will renove any data before this time, by default 0.
-Time in seconds to allow for cooldown of the instrument (or for when operator is nearing instrument). This will renove any data before this time, by default 0.
-The minumum size a window must be over specified threshold (in seconds) for it to be removed
-If remove_raw_noise=True, will perform operation on raw data (‘input_stream’), rather than potentially already-modified data (‘stream’).
-Whether to print status of remove_noise
-Dictionary similar to hvsr_data, but containing modified data with ‘noise’ removed
-Function used to remove outliers curves using Root Mean Square Error to calculate the error of each windowed -Probabilistic Power Spectral Density (PPSD) curve against the median PPSD value at each frequency step for all times. -It calculates the RMSE for the PPSD curves of each component individually. All curves are removed from analysis.
-Some abberant curves often occur due to the remove_noise() function, so this should be run some time after remove_noise(). -In general, the recommended workflow is to run this immediately following the generate_ppsds() function.
-Input dictionary containing all the values and parameters of interest
-The Root Mean Square Error value to use as a threshold for determining whether a curve is an outlier. -This averages over each individual entire curve so that curves with very abberant data (often occurs when using the remove_noise() method), can be identified. -Otherwise, specify a float or integer to use as the cutoff RMSE value (all curves with RMSE above will be removed)
-Whether rmse_thresh should be interepreted as a raw RMSE value or as a percentile of the RMSE values.
-Whether to use the calculated HV Curve or the individual components. This can only be True after process_hvsr() has been run.
-Whether to show a plot of the removed data
-Whether to print output of function to terminal
-Input dictionary with values modified based on work of function.
-The sprit.run() is the main function that allows you to do all your HVSR processing in one simple step (sprit.run() is how you would call it in your code, but it may also be called using sprit.sprit_hvsr.run())
-The input_data parameter of sprit.run() is the only required parameter. This can be either a single file, a list of files (one for each component, for example), a directory (in which case, all obspy-readable files will be added to an HVSRBatch instance), a Rasp. Shake raw data directory, or sample data.
-Filepath to data to be processed. This may be a file or directory, depending on what kind of data is being processed (this can be specified with the source parameter). -For sample data, The following can be specified as the input_data parameter:
----
-- -
Any integer 1-6 (inclusive), or the string (e.g., input_data=”1” or input_data=1 will work)
- -
The word “sample” before any integer (e.g., input_data=”sample1”)
- -
The word “sample” will default to “sample1” if source=’file’.
- -
If source=’batch’, input_data should be input_data=’sample’ or input_data=’batch’. In this case, it will read and process all the sample files using the HVSRBatch class. Set verbose=True to see all the information in the sample batch csv file.
_description_, by default ‘file’
-Whether to perform azimuthal analysis, by default False.
-Whether to remove noise (before processing PPSDs)
-Whether to remove outlier curves from HVSR time windows
-Whether to show plots. This does not affect whether the plots are created (and then inserted as an attribute of HVSRData), only whether they are shown.
-_description_, by default False
-Keyword arguments for the functions listed above. The keyword arguments are unique, so they will get parsed out and passed into the appropriate function.
-Function for designating input parameters for reading in and processing data -See API documentation: [input_params()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.input_params)
-See API documentation at link above or at help(sprit.input_params) for specifics.
-See API documentation at link above or at help(sprit.input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(sprit.input_params) for specifics.
-See API documentation at link above or at help(sprit.input_params) for specifics.
-See API documentation at link above or at help(sprit.input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(sprit.input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(sprit.input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(sprit.input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(sprit.input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-Fetch ambient seismic data from a source to read into obspy stream -See API documentation: [fetch_data()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.fetch_data)
-See API documentation at link above or at help(sprit.fetch_data) for specifics.
-See API documentation at link above or at help(sprit.fetch_data) for specifics.
-See API documentation at link above or at help(fetch_data) for specifics.
-See API documentation at link above or at help(sprit.fetch_data) for specifics.
-See API documentation at link above or at help(sprit.fetch_data) for specifics.
-See API documentation at link above or at help(fetch_data) for specifics.
-See API documentation at link above or at help(fetch_data) for specifics.
-See API documentation at link above or at help(fetch_data) for specifics.
-See API documentation at link above or at help(sprit.fetch_data) for specifics.
-See API documentation at link above or at help(fetch_data) for specifics.
-See API documentation at link above or at help(fetch_data) for specifics.
-See API documentation at link above or at help(fetch_data) for specifics.
-Function to calculate azimuthal horizontal component at specified angle(s). Adds each new horizontal -See API documentation: [calculate_azimuth()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.calculate_azimuth)
-See API documentation at link above or at help(sprit.calculate_azimuth) for specifics.
-See API documentation at link above or at help(calculate_azimuth) for specifics.
-See API documentation at link above or at help(sprit.calculate_azimuth) for specifics.
-See API documentation at link above or at help(sprit.calculate_azimuth) for specifics.
-See API documentation at link above or at help(calculate_azimuth) for specifics.
-See API documentation at link above or at help(calculate_azimuth) for specifics.
-See API documentation at link above or at help(calculate_azimuth) for specifics.
-Function to remove noisy windows from data, using various methods. -See API documentation: [remove_noise()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.remove_noise)
-See API documentation at link above or at help(sprit.remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-Generates PPSDs for each channel -See API documentation: [generate_ppsds()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.generate_ppsds)
-See API documentation at link above or at help(sprit.generate_ppsds) for specifics.
-See API documentation at link above or at help(generate_ppsds) for specifics.
-See API documentation at link above or at help(generate_ppsds) for specifics.
-See API documentation at link above or at help(generate_ppsds) for specifics.
-Process the input data and get HVSR data -See API documentation: [process_hvsr()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.process_hvsr)
-See API documentation at link above or at help(sprit.process_hvsr) for specifics.
-See API documentation at link above or at help(process_hvsr) for specifics.
-See API documentation at link above or at help(process_hvsr) for specifics.
-See API documentation at link above or at help(sprit.process_hvsr) for specifics.
-See API documentation at link above or at help(process_hvsr) for specifics.
-See API documentation at link above or at help(process_hvsr) for specifics.
-See API documentation at link above or at help(process_hvsr) for specifics.
-See API documentation at link above or at help(process_hvsr) for specifics.
-See API documentation at link above or at help(process_hvsr) for specifics.
-Function used to remove outliers curves using Root Mean Square Error to calculate the error of each -See API documentation: [remove_outlier_curves()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.remove_outlier_curves)
-See API documentation at link above or at help(sprit.remove_outlier_curves) for specifics.
-See API documentation at link above or at help(remove_outlier_curves) for specifics.
-See API documentation at link above or at help(remove_outlier_curves) for specifics.
-See API documentation at link above or at help(remove_outlier_curves) for specifics.
-See API documentation at link above or at help(sprit.remove_outlier_curves) for specifics.
-See API documentation at link above or at help(remove_outlier_curves) for specifics.
-See API documentation at link above or at help(remove_outlier_curves) for specifics.
-Function to run tests on HVSR peaks to find best one and see if it passes quality checks -See API documentation: [check_peaks()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.check_peaks)
-See API documentation at link above or at help(sprit.check_peaks) for specifics.
-See API documentation at link above or at help(check_peaks) for specifics.
-See API documentation at link above or at help(sprit.check_peaks) for specifics.
-See API documentation at link above or at help(check_peaks) for specifics.
-See API documentation at link above or at help(sprit.check_peaks) for specifics.
-See API documentation at link above or at help(check_peaks) for specifics.
-Generate and/or print and/or export a report of the HVSR analysis in a variety of formats. -See API documentation: [get_report()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.get_report)
-See API documentation at link above or at help(sprit.get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(sprit.get_report) for specifics.
-See API documentation at link above or at help(sprit.get_report) for specifics.
-See API documentation at link above or at help(sprit.get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(sprit.get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-Export data into pickle format that can be read back in using import_data() so data does not need to -See API documentation: [export_data()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.export_data)
-See API documentation at link above or at help(sprit.export_data) for specifics.
-See API documentation at link above or at help(export_data) for specifics.
-See API documentation at link above or at help(sprit.export_data) for specifics.
-See API documentation at link above or at help(export_data) for specifics.
-If a single file/data point is being processed, a HVSRData object will be returned. Otherwise, it will be a HVSRBatch object. See their documention for more information.
-If the input parameter may not be read correctly. This is raised if the input_params() function fails. This raises an error since no other data processing or reading steps will be able to carried out correctly.
-If the data is not read/fetched correctly using fetch_data(), an error will be raised. This is raised if the fetch_data() function fails. This raises an error since no other data processing steps will be able to carried out correctly.
-If the data being processed is a single file, an error will be raised if generate_ppsds() does not work correctly. No errors are raised for remove_noise() errors (since that is an optional step) and the process_hvsr() step (since that is the last processing step) .
-Notes
-The sprit.run() function calls the following functions. This is the recommended order/set of functions to run to process HVSR using SpRIT. See the API documentation for these functions for more information: -- input_params(): The input_data parameter of input_params() is the only required variable, though others may also need to be called for your data to process correctly. -- fetch_data(): the source parameter of fetch_data() is the only explicit variable in the sprit.run() function aside from input_data and verbose. Everything else gets delivered to the correct function via the kwargs dictionary -- remove_noise(): by default, the kind of noise removal is remove_method=’auto’. See the remove_noise() documentation for more information. If remove_method is set to anything other than one of the explicit options in remove_noise, noise removal will not be carried out. -- generate_ppsds(): generates ppsds for each component, which will be combined/used later. Any parameter of obspy.signal.spectral_estimation.PPSD() may also be read into this function. -- remove_outlier_curves(): removes any outlier ppsd curves so that the data quality for when curves are combined will be enhanced. See the remove_outlier_curves() documentation for more information. -- process_hvsr(): this is the main function processing the hvsr curve and statistics. See process_hvsr() documentation for more details. The hvsr_band parameter sets the frequency spectrum over which these calculations occur. -- check_peaks(): this is the main function that will find and ‘score’ peaks to get a best peak. The parameter peak_freq_range can be set to limit the frequencies within which peaks are checked and scored. -- get_report(): this is the main function that will print, plot, and/or save the results of the data. See the get_report() API documentation for more information. -- export_data(): this function exports the final data output as a pickle file (by default, this pickle object has a .hvsr extension). This can be used to read data back into SpRIT without having to reprocess data.
-Computes elapsed time since the last call.
-The default Windows terminal is not able to display the check mark character correctly. -This function returns another displayable character if platform is Windows
-HVSRBatch
-HVSRData
-batch_data_read()
calculate_azimuth()
check_instance()
check_peaks()
export_data()
export_report()
export_settings()
fetch_data()
generate_ppsds()
get_metadata()
get_report()
gui()
gui_test()
import_data()
import_settings()
input_params()
plot_azimuth()
plot_hvsr()
plot_stream()
process_hvsr()
read_tromino_files()
remove_noise()
remove_outlier_curves()
run()
test_function()
get_check_peaks_kwargs()
get_generate_ppsd_kwargs()
get_get_report_kwargs()
get_process_hvsr_kwargs()
get_remove_noise_kwargs()
get_remove_outlier_curve_kwargs()
parse_comp_plot_list()
parse_hv_plot_list()
parse_plot_string()
parse_spec_plot_list()
plot_outlier_curves()
plot_preview()
plot_results()
process_data()
read_data()
This module/script is used to run sprit from the command line.
-The arguments here should correspond to any of the keyword arguments that can be used with sprit.run() (or sprit_hvsr.run()). See the run() function’s documentation for more information, or the individual functions that are run within it.
-For list inputs, you should pass the argument multiple times(e.g., –report_format “csv” –report_format “print” –report_format “plot”). (In the case of –report_format, you can also just use “all” to get csv, print, and plot report types)
-The input_data parameter of input_params() is the only required argument, though for your data processing to work correctly and to be formatted correctly, you may need to pass others as well.
- - - - -This module is the main SpRIT module that contains all the functions needed to run HVSR analysis.
-The functions defined here are read both by the SpRIT graphical user interface and by the command-line interface to run HVSR analysis on input data.
-See documentation for individual functions for more information.
-Bases: object
HVSRBatch is the data container used for batch processing. -It contains several HVSRData objects (one for each site). -These can be accessed using their site name, -either square brackets (HVSRBatchVariable[“SiteName”]) or the dot (HVSRBatchVariable.SiteName) accessor.
-The dot accessor may not work if there is a space in the site name.
-All of the functions in the sprit package are designed to perform the bulk of their operations iteratively -on the individual HVSRData objects contained in the HVSRBatch object, and do little with the HVSRBatch object itself, -besides using it determine which sites are contained within it.
-Methods
-
|
-Make a copy of the HVSRBatch object. |
-
|
-Method to export HVSRData objects in HVSRBatch container to indivdual .hvsr pickle files. |
-
|
-Method to export settings from HVSRData object in HVSRBatch object. |
-
|
-Method to get report from processed data, in print, graphical, or tabular format. |
-
|
-Method to return both the site names and the HVSRData object as a set of dict_items tuples. |
-
|
-Method to return the "keys" of the HVSRBatch object. |
-
|
-Method to plot data, based on the sprit.plot_hvsr() function. |
-
|
-Wrapper of get_report() |
-
Make a copy of the HVSRBatch object. Uses python copy module.
-Based on input, creates either a shallow or deep copy of the HVSRBatch object. Shallow is equivalent of copy.copy(). Input of ‘deep’ is equivalent of copy.deepcopy() (still experimental). Defaults to shallow.
-Method to export HVSRData objects in HVSRBatch container to indivdual .hvsr pickle files.
-Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). By default True. If True, it will first try to save each file to the same directory as input_data, then if that does not work, to the current working directory, then to the user’s home directory, by default True
-The extension to use for the output, by default ‘hvsr’. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension.
-Method to export settings from HVSRData object in HVSRBatch object.
-Simply calls sprit.export_settings() from specified HVSRData object in the HVSRBatch object. -See sprit.export_settings() for more details.
-The name of the site whose settings should be exported. If None, will default to the first site, by default None.
-Filepath to output file. If left as ‘default’, will save as the default value in the resources directory. If that is not possible, will save to home directory, by default ‘default’
-They type of settings to save, by default ‘all’
-Whether to include the location information in the instrument settings, if that settings type is selected, by default False
-Whether to print output (filepath and settings) to terminal, by default True
-See also
- -Method to get report from processed data, in print, graphical, or tabular format.
-May return nothing, pandas.Dataframe, or pyplot Figure, depending on input.
-See also
-get_report
Method to return both the site names and the HVSRData object as a set of dict_items tuples. Functions similar to dict.items().
-_description_
-Method to return the “keys” of the HVSRBatch object. For HVSRBatch objects, these are the site names. Functions similar to dict.keys().
-A dict_keys object listing the site names of each of the HVSRData objects contained in the HVSRBatch object
-Method to plot data, based on the sprit.plot_hvsr() function.
-All the same kwargs and default values apply as plot_hvsr(). -For return_fig, returns it to the ‘Plot_Report’ attribute of each HVSRData object
-_description_
-See also
-plot_hvsr
Wrapper of get_report()
-See also
-get_report
Bases: object
HVSRData is the basic data class of the sprit package. -It contains all the processed data, input parameters, and reports.
-These attributes and objects can be accessed using square brackets or the dot accessor. For example, to access the site name, HVSRData[‘site’] and HVSRData.site will both return the site name.
-Some of the methods that work on the HVSRData object (e.g., .plot() and .get_report()) are essentially wrappers for some of the main sprit package functions (sprit.plot_hvsr() and sprit.get_report(), respectively)
-batch
Whether this HVSRData object is part of an HVSRBatch object.
-datastream
A copy of the original obspy datastream read in.
-params
Dictionary containing the parameters used to process the data
-ppsds
Dictionary copy of the class object obspy.signal.spectral_estimation.PPSD().
-ppsds_obspy
The original ppsd information from the obspy.signal.spectral_estimation.PPSD(), so as to keep original if copy is manipulated/changed.
-Methods
-
|
-Make a copy of the HVSRData object. |
-
|
-Method to export HVSRData objects to .hvsr pickle files. |
-
|
-Method to export settings from HVSRData object. |
-
|
-Method to get report from processed data, in print, graphical, or tabular format. |
-
|
-Method to return the "items" of the HVSRData object. |
-
|
-Method to return the "keys" of the HVSRData object. |
-
|
-Method to plot data, wrapper of sprit.plot_hvsr() |
-
|
-Wrapper of get_report() |
-
Whether this HVSRData object is part of an HVSRBatch object. This is used throughout the code to help direct the object into the proper processing pipeline.
-True if HVSRData object is part of HVSRBatch object, otherwise, False
-Make a copy of the HVSRData object. Uses python copy module.
-Based on input, creates either a shallow or deep copy of the HVSRData object. Shallow is equivalent of copy.copy(). Input of type=’deep’ is equivalent of copy.deepcopy() (still experimental). Defaults to shallow.
-A copy of the original obspy datastream read in. This helps to retain the original data even after processing is carried out.
-Obspy stream
-Method to export HVSRData objects to .hvsr pickle files.
-Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). -By default True. -If True, it will first try to save each file to the same directory as input_data, then if that does not work, to the current working directory, then to the user’s home directory, by default True
-The extension to use for the output, by default ‘hvsr’. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension.
-Method to export settings from HVSRData object. Simply calls sprit.export_settings() from the HVSRData object. See sprit.export_settings() for more details.
-Filepath to output file. If left as ‘default’, will save as the default value in the resources directory. If that is not possible, will save to home directory, by default ‘default’
-They type of settings to save, by default ‘all’
-Whether to include the location information in the instrument settings, if that settings type is selected, by default False
-Whether to print output (filepath and settings) to terminal, by default True
-Method to get report from processed data, in print, graphical, or tabular format.
-May return nothing, pandas.Dataframe, or pyplot Figure, depending on input.
-See also
-get_report
Method to return the “items” of the HVSRData object. For HVSRData objects, this is a dict_items object with the keys and values in tuples. Functions similar to dict.items().
-A dict_items object of the HVSRData objects attributes, parameters, etc.
-Method to return the “keys” of the HVSRData object. For HVSRData objects, these are the attributes and parameters of the object. Functions similar to dict.keys().
-A dict_keys object of the HVSRData objects attributes, parameters, etc.
-Dictionary containing the parameters used to process the data
-Dictionary containing the process parameters
-Method to plot data, wrapper of sprit.plot_hvsr()
-See also
- -Dictionary copy of the class object obspy.signal.spectral_estimation.PPSD(). The dictionary copy allows manipulation of the data in PPSD, whereas that data cannot be easily manipulated in the original Obspy object.
-Dictionary copy of the PPSD information from generate_ppsds()
-The original ppsd information from the obspy.signal.spectral_estimation.PPSD(), so as to keep original if copy is manipulated/changed.
-Wrapper of get_report()
-See also
-get_report
Function to read data in data as a batch of multiple data files. This is best used through sprit.fetch_data(*args, source=’batch’, **other_kwargs).
-Input data information for how to read in data as batch. Can be filepath or list of filepaths/stream objects. -If filepath, should point to .csv (or similar that can be read by pandas.read_csv()) with batch data information.
-Type of batch read, only ‘table’ and ‘filelist’ accepted. -If ‘table’, will read data from a file read in using pandas.read_csv(), by default ‘table’
-Name of parameter column from batch information file. Only used if a batch_type=’table’ and single parameter column is used, rather than one column per parameter (for single parameter column, parameters are formatted with = between keys/values and , between item pairs), by default None
-Parameters to be used if batch_type=’filelist’. If it is a list, needs to be the same length as batch_data. If it is a dict, will be applied to all files in batch_data and will combined with extra keyword arguments caught by **readcsv_getMeta_fetch_kwargs.
-Whether to print information to terminal during batch read, by default False
-Keyword arguments that will be read into pandas.read_csv(), sprit.input_params, sprit.get_metadata(), and/or sprit.fetch_data()
-HVSRBatch object with each item representing a different HVSRData object
-_description_
-Function to calculate azimuthal horizontal component at specified angle(s). Adds each new horizontal component as a radial component to obspy.Stream object at hvsr_data[‘stream’]
-Input HVSR data
-If azimuth_type=’multiple’, this is the angular step (in unit azimuth_unit) of each of the azimuthal measurements. -If azimuth_type=’single’ this is the angle (in unit azimuth_unit) of the single calculated azimuthal measruement. By default 10.
-What type of azimuthal measurement to make, by default ‘multiple’. -If ‘multiple’ (or {‘multi’, ‘mult’, ‘m’}), will take a measurement at each angular step of azimuth_angle of unit azimuth_unit. -If ‘single’ (or {‘sing’, ‘s’}), will take a single azimuthal measurement at angle specified in azimuth_angle.
-Angular unit used to specify azimuth_angle parameter. By default ‘degrees’. -If ‘degrees’ (or {‘deg’, ‘d’}), will use degrees. -If ‘radians’ (or {‘rad’, ‘r’}), will use radians.
-Whether to show azimuthal plot, by default False.
-Whether to print terminal output, by default False
-Updated HVSRData object specified in hvsr_data with hvsr_data[‘stream’] attribute containing additional components (EHR-*), -with * being zero-padded (3 digits) azimuth angle in degrees.
-Function to run tests on HVSR peaks to find best one and see if it passes quality checks
-Dictionary containing all the calculated information about the HVSR data (i.e., hvsr_out returned from process_hvsr)
-2-item tuple or list with lower and upper limit of frequencies to analyze
-How to select the “best” peak used in the analysis. For peak_selection=”max” (default value), the highest peak within peak_freq_range is used. -For peak_selection=’scored’, an algorithm is used to select the peak based in part on which peak passes the most SESAME criteria. -If a numeric value is used (e.g., int or float), this should be a frequency value to manually select as the peak of interest.
-The frequency range within which to check for peaks. If there is an HVSR curve with multiple peaks, this allows the full range of data to be processed while limiting peak picks to likely range.
-Whether to print results and inputs to terminal.
-Object containing previous input data, plus information about peak tests
-Export data into pickle format that can be read back in using import_data() so data does not need to be processed each time. -Default extension is .hvsr but it is still a pickled file that can be read in using pickle.load().
-Data to be exported
-String or filepath object to be read by pathlib.Path() and/or a with open(hvsr_export_path, ‘wb’) statement. If None, defaults to input input_data directory, by default None
-Filepath extension to use for data file, by default ‘hvsr’
-Function to export reports to disk. Exportable formats include: -* ‘table’: saves a pandas DataFrame as a csv) -* ‘plot’: saves the matplotlib or plotly plot figure (depending on what is designated via plot_engine) as an image (png by default) -* ‘print’: saves the print report as a .txt file -* ‘html’: saves the html report as a .html file -* ‘pdf’: saves the pdf report as a .pdf file
-HVSRData object containing the HVSR data
-The path to where the report should be exported. -If this is None (default), this is written to the home directory. -If this is a True, uses the same directory as the input data, but generates a filename. -If this is a directory, generates a filename. -If filename is specified and the extension does not match the report type, the extension is adjusted. -Otherwise, this is the output file or , by default None
-If table is the report type, this can prevent overwriting data, by default ‘rename’. -* “rename” (or “keep”): renames the new file to prevent overwrite, appends a digit to the end of filename -* “append”: appends the new data to the existing file -* “overwrite”: overwrites the existing file
-The format (or a list of formats) to export the report, by default [‘pdf’].
-Whether to show the designated reports that were chosen for export, by default True
-Whether to print progress and other information to terminal, by default False
-An HVSRData object that is the same as hvsr_results, but with any additionally generated reports.
-Save settings to json file
-Where to save the json file(s) containing the settings, by default ‘default’. -If “default,” will save to sprit package resources. Otherwise, set a filepath location you would like for it to be saved to. -If ‘all’ is selected, a directory should be supplied. -Otherwise, it will save in the directory of the provided file, if it exists. Otherwise, defaults to the home directory.
-What kind of settings to save. -If ‘all’, saves all possible types in their respective json files. -If ‘instrument’, save the instrument settings to their respective file. -If ‘processing’, saves the processing settings to their respective file. By default ‘all’
-Whether to include the location parametersin the exported settings document.This includes xcoord, ycoord, elevation, elev_unit, and input_crs
-Whether to print outputs and information to the terminal
-Fetch ambient seismic data from a source to read into obspy stream
-Parameters defined using input_params() function.
-‘raw’ finds raspberry shake data, from raw output copied using scp directly from Raspberry Shake, either in folder or subfolders; -‘dir’ is used if the day’s 3 component files (currently Raspberry Shake supported only) are all 3 contained in a directory by themselves. -‘file’ is used if the params[‘input_data’] specified in input_params() is the direct filepath to a single file to be read directly into an obspy stream. -‘batch’ is used to read a list or specified set of seismic files.
---Most commonly, a csv file can be read in with all the parameters. Each row in the csv is a separate file. Columns can be arranged by parameter.
-
If None (or False), data is not trimmed in this function. -Otherwise, this is the directory to save trimmed and exported data.
-If data_export_path is not None, this is the format in which to save the data
-If False, data is not detrended. -Otherwise, this should be a string accepted by the type parameter of the obspy.core.trace.Trace.detrend method: https://docs.obspy.org/packages/autogen/obspy.core.trace.Trace.detrend.html
-If detrend parameter is ‘spline’ or ‘polynomial’, this is passed directly to the order parameter of obspy.core.trace.Trace.detrend method.
-Whether to update the metadata file, used primarily with Raspberry Shake data which uses a generic inventory file.
-Whether to plot the raw input stream. This plot includes a spectrogram (Z component) and the raw (with decimation for speed) plots of each component signal.
-Which plotting library/engine to use for plotting the Input stream. Options are ‘matplotlib’, ‘plotly’, or ‘obspy’ (not case sensitive).
-Whether to print outputs and inputs to the terminal
-Keywords arguments, primarily for ‘batch’ and ‘dir’ sources
-Same as params parameter, but with an additional “stream” attribute with an obspy data stream with 3 traces: Z (vertical), N (North-south), and E (East-west)
-Generates PPSDs for each channel
-Channels need to be in Z, N, E order -Info on PPSD creation here: https://docs.obspy.org/packages/autogen/obspy.signal.spectral_estimation.PPSD.html
-Data object containing all the parameters and other data of interest (stream and paz, for example)
-Whether to generate PPSDs for azimuthal data
-Whether to print inputs and results to terminal
-Dictionary with keyword arguments that are passed directly to obspy.signal.PPSD. -If the following keywords are not specified, their defaults are amended in this function from the obspy defaults for its PPSD function. Specifically:
----
-- -
ppsd_length defaults to 30 (seconds) here instead of 3600
- -
skip_on_gaps defaults to True instead of False
- -
period_step_octaves defaults to 0.03125 instead of 0.125
Dictionary containing entries with ppsds for each channel
-Get metadata and calculate or get paz parameter needed for PPSD
-Ouput from input_params() function
-If not specified, does not write file
-Whether to update the metadata file itself, or just read as-is. If using provided raspberry shake metadata file, select True.
-This passes the source variable value to _read_RS_metadata. It is expected that this is passed directly from the source parameter of sprit.fetch_data()
-Modified input dictionary with additional key:value pair containing paz dictionary (key = “paz”)
-Generate and/or print and/or export a report of the HVSR analysis in a variety of formats.
-Formats include: -* ‘print’: A (monospace) text summary of the HVSR results -* ‘table’: A pandas.DataFrame summary of the HVSR Results.
---This is useful for copy/pasting directly into a larger worksheet.
-
‘plot’: A plot summary of the HVSR results, generated using the plot_hvsr() function.
‘html’: An HTML document/text of the HVSR results. This includes the table, print, and plot reports in one document.
The PDF report is simply the HTML report saved to an A4-sized PDF document.
-Dictionary containing all the information about the processed hvsr data
-Format in which to print or export the report. -The following report_formats return the following items in the following attributes:
----
-- -
‘plot’: hvsr_results[‘Print_Report’] as a str
- -
‘print’: hvsr_results[‘HV_Plot’] - matplotlib.Figure object
- -
-
-- ‘table’: hvsr_results[‘Table_Report’]- pandas.DataFrame object
- -
-
-- -
list/tuple - a list or tuple of the above objects, in the same order they are in the report_formats list
- -
‘html’: hvsr_results[‘HTML_Report’] - a string containing the text for an HTML document
- -
‘pdf’: currently does not save to the HVSRData object itself, can only be saved to the disk directly
What type of plot to plot, if ‘plot’ part of report_formats input
-Which azimuth to plot, by default “HV” which is the main “azimuth” combining the E and N components
-How to handle table report outputs if the designated csv output file already exists. By default, appends the new information to the end of the existing file.
-If True, only reads output to appropriate attribute of data class (ie, print does not print, only reads text into variable). If False, performs as normal.
-A string or list of strings indicating which report formats should be exported to disk.
-If None or False, does not export; if True, will export to same directory as the input_data parameter in the input_params() function. -Otherwise, it should be a string or path object indicating where to export results. May be a file or directory. -If a directory is specified, the filename will be “<site_name>_<acq_date>_<UTC start time>-<UTC end time>”. -The extension/suffix defaults to png for report_formats=”plot”, csv for ‘table’, txt for ‘print’, html for ‘html’, and pdf for ‘pdf.’
-Whether to print the results to terminal. This is the same output as report_formats=’print’, and will not repeat if that is already selected
-Function to open a graphical user interface (gui)
-What type of gui to open. “default” opens regular windowed interface, -“widget” opens jupyter widget’ -“lite” open lite (pending update), by default ‘default’
-Function to import .hvsr (or other extension) data exported using export_data() function
-Filepath of file created using export_data() function. This is usually a pickle file with a .hvsr extension
-Type of format data is in. Currently, only ‘pickle’ supported. Eventually, json or other type may be supported, by default ‘pickle’.
-Function for designating input parameters for reading in and processing data
-Filepath of data. This can be a directory or file, but will need to match with what is chosen later as the source parameter in fetch_data()
-Site name as designated by user for ease of reference. Used for plotting titles, filenames, etc.
-A prefix that may be used to create unique identifiers for each site. -The identifier created is saved as the [‘HVSR_ID’] attribute of the HVSRData object, -and is equivalent to the following formatted string: -f”{id_prefix}-{acq_date.strftime(“%Y%m%d”)}-{starttime.strftime(“%H%M”)}-{station}”.
-The network designation of the seismometer. This is necessary for data from Raspberry Shakes. ‘AM’ is for Amateur network, which fits Raspberry Shakes.
-The station name of the seismometer. This is necessary for data from Raspberry Shakes.
-Location information of the seismometer.
-The three channels used in this analysis, as a list of strings. Preferred that Z component is first, but not necessary
-If string, preferred format is ‘YYYY-MM-DD’. -If int, this will be interpreted as the time_int of year of current year (e.g., 33 would be Feb 2 of current year) -If date or datetime object, this will be the date. Make sure to account for time change when converting to UTC (if UTC is the following time_int, use the UTC time_int).
-Start time of data stream. This is necessary for Raspberry Shake data in ‘raw’ form, or for trimming data. Format can be either ‘HH:MM:SS.micros’ or ‘HH:MM’ at minimum.
-End time of data stream. This is necessary for Raspberry Shake data in ‘raw’ form, or for trimming data. Same format as starttime.
-Timezone of input data. If string, ‘UTC’ will use the time as input directly. Any other string value needs to be a TZ identifier in the IANA database, a wikipedia page of these is available here: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. -If int, should be the int value of the UTC offset (e.g., for American Eastern Standard Time: -5). -This is necessary for Raspberry Shake data in ‘raw’ format.
-Longitude (or easting, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in table output, but will likely be used in future for mapping/profile purposes.
-Latitute (or northing, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in table output, but will likely be used in future for mapping/profile purposes.
-Coordinate reference system of input data, as used by pyproj.CRS.from_user_input()
-Coordinate reference system to which input data will be transformed, as used by pyproj.CRS.from_user_input()
-Surface elevation of data point. Not currently used (except in table output), but will likely be used in the future.
-Depth of seismometer. Not currently used, but will likely be used in the future.
-Instrument from which the data was acquired.
-Filepath of metadata, in format supported by obspy.read_inventory. If default value of None, will read from resources folder of repository (only supported for Raspberry Shake).
-Two-element list containing low and high “corner” frequencies (in Hz) for processing. This can specified again later.
-Two-element list or tuple containing low and high frequencies (in Hz) that are used to check for HVSR Peaks. This can be a tigher range than hvsr_band, but if larger, it will still only use the hvsr_band range.
-If filepath, should point to a .proc json file with processing parameters (i.e, an output from sprit.export_settings()). -Note that this only applies to parameters for the functions: ‘fetch_data’, ‘remove_noise’, ‘generate_ppsds’, ‘process_hvsr’, ‘check_peaks’, and ‘get_report.’ -If dictionary, dictionary containing nested dictionaries of function names as they key, and the parameter names/values as key/value pairs for each key. -If a function name is not present, or if a parameter name is not present, default values will be used. -For example:
---{ ‘fetch_data’ : {‘source’:’batch’, ‘data_export_path’:”/path/to/trimmed/data”, ‘data_export_format’:’mseed’, ‘detrend’:’spline’, ‘plot_input_stream’:True, ‘verbose’:False, kwargs:{‘kwargskey’:’kwargsvalue’}} }
-
Whether to print output and results to terminal
-sprit.HVSRData class containing input parameters, including data file path and metadata path. This will be used as an input to other functions. If batch processing, params will be converted to batch type in fetch_data() step.
-Function to plot azimuths when azimuths are calculated
-HVSRData that has gone through at least the sprit.fetch_data() step, and before sprit.generate_ppsds()
-Whether to display the peak value at each azimuth calculated on the chart, by default False
-Whether to interpolate the azimuth data to get a smoother plot. -This is just for visualization, does not change underlying data. -It takes a lot of time to process the data, but interpolation for vizualization can happen fairly fast. By default True.
-Whether to display the grid on the chart, by default False
-Figure and axis of resulting azimuth plot
-Function to plot HVSR data
-Dictionary containing output from process_hvsr function
-The plot_type of plot(s) to plot. If list, will plot all plots listed -- ‘HVSR’ - Standard HVSR plot, including standard deviation. Options are included below:
----
-- -
‘p’ shows a vertical dotted line at frequency of the “best” peak
- -
‘ann’ annotates the frequency value of of the “best” peak
- -
‘all’ shows all the peaks identified in check_peaks() (by default, only the max is identified)
- -
‘t’ shows the H/V curve for all time windows
- -
‘tp’ shows all the peaks from the H/V curves of all the time windows
- -
‘fr’ shows the window within which SpRIT will search for peak frequencies, as set by peak_freq_range
- -
-
-- ‘test’ shows a visualization of the results of the peak validity test(s). Examples:
- -
-
-- -
‘tests’ visualizes the results of all the peak tests (not the curve tests)
- -
-
-- ‘test12’ shows the results of tests 1 and 2.
- -
-
-- -
Append any number 1-6 after ‘test’ to show a specific test result visualized
‘+’ (as a suffix in ‘C+’ or ‘COMP+’) plots C on a plot separate from HVSR (C+ is default, but without + will plot on the same plot as HVSR)
‘p’ shows a vertical dotted line at frequency of the “best” peak
‘ann’ annotates the frequency value of of the “best” peak
‘all’ shows all the peaks identified in check_peaks() (by default, only the max is identified)
‘t’ shows the H/V curve for all time windows
‘p’ shows a horizontal dotted line at the frequency of the “best” peak
‘ann’ annotates the frequency value of the “best” peak
‘all’ shows all the peaks identified in check_peaks()
‘tp’ shows all the peaks of the H/V curve at all time windows
‘p’ shows a point at each calculated (not interpolated) azimuth peak
‘g’ shows grid lines at various angles
This is the default, so usually ‘i’ is not needed.
-‘-i’ prohibits interpolation (only shows the calculated azimuths, as determined by azimuth_angle (default = 30))
What ‘azimuth’ to plot, default being standard N E components combined
-Whether to output the plots as subplots (True) or as separate plots (False)
-If not None, matplotlib figure on which plot is plotted
-If not None, matplotlib axis on which plot is plotted
-Whether to return figure and axis objects
-Which engine to use for plotting. Both “matplotlib” and “plotly” are acceptable. For shorthand, ‘mpl’, ‘m’ also work for matplotlib; ‘plty’ or ‘p’ also work for plotly. Not case sensitive.
-Directory in which to save figures
-Suffix to add to end of figure filename(s), if save_dir is used
-Whether to show legend in plot
-Whether to show plot
-Whether to close figures before plotting
-Whether to clear figures before plotting
-Keyword arguments for matplotlib.pyplot
-Returns figure and axis matplotlib.pyplot objects if return_fig=True, otherwise, simply plots the figures
-Function to plot a stream of data with Z, E, N components using matplotlib. Similar to obspy.Stream.Plot(), but will be formatted differently and eventually more customizable. -This is also used in various functions throughout the package.
-Obpsy stream of data with Z, E, N componenents
-Data object with parameters relevant for creating plot
-Optional: if not None, matplotlib.Figure in which to plot the resulting figure (i.e., can be plotted in existing figure)
-Optional: if not None, matplotlib.Axis in which to plot the resulting figure (i.e., can be plotted in existing axis)
-Whether to do matplotlib.pylot.show(), by default False
-Optional: the standard deviation of the data at which to clip the chart, by default 0.75
-Optional: whether to return the figure, by default True
-Tuple containing the figure and axes of the resulting plot, only returned if return_fig = True
-Process the input data and get HVSR data
-This is the main function that uses other (private) functions to do -the bulk of processing of the HVSR data and the data quality checks.
-Data object containing all the parameters input and generated by the user (usually, during sprit.input_params(), sprit.fetch_data(), sprit.generate_ppsds() and/or sprit.remove_noise()).
-(not used)
‘Diffuse field assumption’ H = √( (eie_E + eie_N) / eie_Z), eie = equal interval energy
‘Arithmetic Mean’ H ≡ (HN + HE)/2
‘Geometric Mean’ H ≡ √(HN · HE), recommended by the SESAME project (2004)
‘Vector Summation’ H ≡ √(HN^2 + HE^2)
‘Quadratic Mean’ H ≡ √(HN^2 + HE^2)/2
‘Maximum Horizontal Value’ H ≡ max {HN, HE}
‘Minimum Horizontal Valey’ H ≡ min {HN, HE}
‘Single Azimuth’ H = H2·cos(az) + H1·sin(az)
If True, default to smooth H/V curve to using savgoy filter with window length of 51 (works well with default resample of 1000 pts) -If int, the length of the window in the savgoy filter.
-The Konno & Ohmachi method uses the obspy.signal.konnoohmachismoothing.konno_ohmachi_smoothing() function: https://docs.obspy.org/packages/autogen/obspy.signal.konnoohmachismoothing.konno_ohmachi_smoothing.html
The constant method uses a window of constant length f_smooth_width
The proportional method uses a window the percentage length of the frequncy steps/range (f_smooth_width now refers to percentage)
See here for more information: https://www.geopsy.org/documentation/geopsy/hv-processing.html
-For ‘konno ohmachi’: passed directly to the bandwidth parameter of the konno_ohmachi_smoothing() function, determines the width of the smoothing peak, with lower values resulting in broader peak. Must be > 0.
For ‘constant’: the size of a triangular smoothing window in the number of frequency steps
For ‘proportional’: the size of a triangular smoothing window in percentage of the number of frequency steps (e.g., if 1000 frequency steps/bins and f_smooth_width=40, window would be 400 steps wide)
If True, default to resample H/V data to include 1000 frequency values for the rest of the analysis -If int, the number of data points to interpolate/resample/smooth the component psd/HV curve data to.
-If False, outlier curve removal is not carried out here. -If True, defaults to 98 (98th percentile). -Otherwise, float of percentile used as rmse_thresh of remove_outlier_curve().
-The azimuth angle to use when method is single azimuth.
-Whether to print output to terminal
-Dictionary containing all the information about the data, including input parameters
-Function to read data from tromino. Specifically, this has been lightly tested on Tromino 3G+ machines
-The input parameter _datapath_ from sprit.input_params()
-The parameters as read in from input_params() and and fetch_data()
-Whether to print results to terminal, by default False
-An obspy.Stream object containing the trace data from the Tromino instrument
-Function to remove noisy windows from data, using various methods.
-Methods include -- Manual window selection (by clicking on a chart with spectrogram and stream data), -- Auto window selection, which does the following two in sequence (these can also be done indepently):
----
-- -
A sta/lta “antitrigger” method (using stalta values to automatically remove triggered windows where there appears to be too much noise)
- -
A noise threshold method, that cuts off all times where the noise threshold equals more than (by default) 80% of the highest amplitude noise sample for the length specified by lta (in seconds)
- -
A saturation threshold method, that cuts off all times where the noise threshold equals more than (by default) 99.5% of the highest amplitude noise sample.
Dictionary containing all the data and parameters for the HVSR analysis
-The different methods for removing noise from the dataset. A list of strings will also work, in which case, it should be a list of the above strings. See descriptions above for what how each method works. By default ‘auto.’ -If remove_method=’auto’, this is the equivalent of remove_method=[‘noise threshold’, ‘antitrigger’, ‘saturation threshold’, ‘warm_cool’]
-A list/tuple of two items [s, e] or a list/tuple of two-item lists/tuples [[s0, e0], [s1,e1],…[sn, en]] with start and end time(s) for windows to keep for processing. -Data outside of these times will be excluded from processing. -Times should be obspy.UTCDateTime objects to ensure precision, but time strings (“13:05”) will also work in most cases (excpetions may be when the data stream starts/ends on different UTC days)
-Percentage (between 0 and 1), to use as the threshold at which to remove data. This is used in the saturation method. By default 0.995. -If a value is passed that is greater than 1, it will be divided by 100 to obtain the percentage.
-Percentage (between 0 and 1), to use as the threshold at which to remove data, if it persists for longer than time (in seconds (specified by min_win_size)). This is used in the noise threshold method. By default 0.8. -If a value is passed that is greater than 1, it will be divided by 100 to obtain the percentage.
-Short term average (STA) window (in seconds), by default 2. For use with sta/lta antitrigger method.
-Long term average (STA) window (in seconds), by default 30. For use with sta/lta antitrigger method.
-Two-item list or tuple with the thresholds for the stalta antitrigger. The first value (index [0]) is the lower threshold, the second value (index [1] is the upper threshold), by default [0.5,5]
-Time in seconds to allow for warmup of the instrument (or while operator is still near instrument). This will renove any data before this time, by default 0.
-Time in seconds to allow for cooldown of the instrument (or for when operator is nearing instrument). This will renove any data before this time, by default 0.
-The minumum size a window must be over specified threshold (in seconds) for it to be removed
-If remove_raw_noise=True, will perform operation on raw data (‘input_stream’), rather than potentially already-modified data (‘stream’).
-Whether to print status of remove_noise
-Dictionary similar to hvsr_data, but containing modified data with ‘noise’ removed
-Function used to remove outliers curves using Root Mean Square Error to calculate the error of each windowed -Probabilistic Power Spectral Density (PPSD) curve against the median PPSD value at each frequency step for all times. -It calculates the RMSE for the PPSD curves of each component individually. All curves are removed from analysis.
-Some abberant curves often occur due to the remove_noise() function, so this should be run some time after remove_noise(). -In general, the recommended workflow is to run this immediately following the generate_ppsds() function.
-Input dictionary containing all the values and parameters of interest
-The Root Mean Square Error value to use as a threshold for determining whether a curve is an outlier. -This averages over each individual entire curve so that curves with very abberant data (often occurs when using the remove_noise() method), can be identified. -Otherwise, specify a float or integer to use as the cutoff RMSE value (all curves with RMSE above will be removed)
-Whether rmse_thresh should be interepreted as a raw RMSE value or as a percentile of the RMSE values.
-Whether to use the calculated HV Curve or the individual components. This can only be True after process_hvsr() has been run.
-Whether to show a plot of the removed data
-Whether to print output of function to terminal
-Input dictionary with values modified based on work of function.
-The sprit.run() is the main function that allows you to do all your HVSR processing in one simple step (sprit.run() is how you would call it in your code, but it may also be called using sprit.sprit_hvsr.run())
-The input_data parameter of sprit.run() is the only required parameter. This can be either a single file, a list of files (one for each component, for example), a directory (in which case, all obspy-readable files will be added to an HVSRBatch instance), a Rasp. Shake raw data directory, or sample data.
-Filepath to data to be processed. This may be a file or directory, depending on what kind of data is being processed (this can be specified with the source parameter). -For sample data, The following can be specified as the input_data parameter:
----
-- -
Any integer 1-6 (inclusive), or the string (e.g., input_data=”1” or input_data=1 will work)
- -
The word “sample” before any integer (e.g., input_data=”sample1”)
- -
The word “sample” will default to “sample1” if source=’file’.
- -
If source=’batch’, input_data should be input_data=’sample’ or input_data=’batch’. In this case, it will read and process all the sample files using the HVSRBatch class. Set verbose=True to see all the information in the sample batch csv file.
_description_, by default ‘file’
-Whether to perform azimuthal analysis, by default False.
-Whether to remove noise (before processing PPSDs)
-Whether to remove outlier curves from HVSR time windows
-Whether to show plots. This does not affect whether the plots are created (and then inserted as an attribute of HVSRData), only whether they are shown.
-_description_, by default False
-Keyword arguments for the functions listed above. The keyword arguments are unique, so they will get parsed out and passed into the appropriate function.
-Function for designating input parameters for reading in and processing data -See API documentation: [input_params()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.input_params)
-See API documentation at link above or at help(sprit.input_params) for specifics.
-See API documentation at link above or at help(sprit.input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(sprit.input_params) for specifics.
-See API documentation at link above or at help(sprit.input_params) for specifics.
-See API documentation at link above or at help(sprit.input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(sprit.input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(sprit.input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(sprit.input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(sprit.input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-See API documentation at link above or at help(input_params) for specifics.
-Fetch ambient seismic data from a source to read into obspy stream -See API documentation: [fetch_data()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.fetch_data)
-See API documentation at link above or at help(sprit.fetch_data) for specifics.
-See API documentation at link above or at help(sprit.fetch_data) for specifics.
-See API documentation at link above or at help(fetch_data) for specifics.
-See API documentation at link above or at help(sprit.fetch_data) for specifics.
-See API documentation at link above or at help(sprit.fetch_data) for specifics.
-See API documentation at link above or at help(fetch_data) for specifics.
-See API documentation at link above or at help(fetch_data) for specifics.
-See API documentation at link above or at help(fetch_data) for specifics.
-See API documentation at link above or at help(sprit.fetch_data) for specifics.
-See API documentation at link above or at help(fetch_data) for specifics.
-See API documentation at link above or at help(fetch_data) for specifics.
-See API documentation at link above or at help(fetch_data) for specifics.
-Function to calculate azimuthal horizontal component at specified angle(s). Adds each new horizontal -See API documentation: [calculate_azimuth()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.calculate_azimuth)
-See API documentation at link above or at help(sprit.calculate_azimuth) for specifics.
-See API documentation at link above or at help(calculate_azimuth) for specifics.
-See API documentation at link above or at help(sprit.calculate_azimuth) for specifics.
-See API documentation at link above or at help(sprit.calculate_azimuth) for specifics.
-See API documentation at link above or at help(calculate_azimuth) for specifics.
-See API documentation at link above or at help(calculate_azimuth) for specifics.
-See API documentation at link above or at help(calculate_azimuth) for specifics.
-Function to remove noisy windows from data, using various methods. -See API documentation: [remove_noise()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.remove_noise)
-See API documentation at link above or at help(sprit.remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-See API documentation at link above or at help(remove_noise) for specifics.
-Generates PPSDs for each channel -See API documentation: [generate_ppsds()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.generate_ppsds)
-See API documentation at link above or at help(sprit.generate_ppsds) for specifics.
-See API documentation at link above or at help(generate_ppsds) for specifics.
-See API documentation at link above or at help(generate_ppsds) for specifics.
-See API documentation at link above or at help(generate_ppsds) for specifics.
-Process the input data and get HVSR data -See API documentation: [process_hvsr()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.process_hvsr)
-See API documentation at link above or at help(sprit.process_hvsr) for specifics.
-See API documentation at link above or at help(process_hvsr) for specifics.
-See API documentation at link above or at help(process_hvsr) for specifics.
-See API documentation at link above or at help(sprit.process_hvsr) for specifics.
-See API documentation at link above or at help(process_hvsr) for specifics.
-See API documentation at link above or at help(process_hvsr) for specifics.
-See API documentation at link above or at help(process_hvsr) for specifics.
-See API documentation at link above or at help(process_hvsr) for specifics.
-See API documentation at link above or at help(process_hvsr) for specifics.
-Function used to remove outliers curves using Root Mean Square Error to calculate the error of each -See API documentation: [remove_outlier_curves()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.remove_outlier_curves)
-See API documentation at link above or at help(sprit.remove_outlier_curves) for specifics.
-See API documentation at link above or at help(remove_outlier_curves) for specifics.
-See API documentation at link above or at help(remove_outlier_curves) for specifics.
-See API documentation at link above or at help(remove_outlier_curves) for specifics.
-See API documentation at link above or at help(sprit.remove_outlier_curves) for specifics.
-See API documentation at link above or at help(remove_outlier_curves) for specifics.
-See API documentation at link above or at help(remove_outlier_curves) for specifics.
-Function to run tests on HVSR peaks to find best one and see if it passes quality checks -See API documentation: [check_peaks()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.check_peaks)
-See API documentation at link above or at help(sprit.check_peaks) for specifics.
-See API documentation at link above or at help(check_peaks) for specifics.
-See API documentation at link above or at help(sprit.check_peaks) for specifics.
-See API documentation at link above or at help(check_peaks) for specifics.
-See API documentation at link above or at help(sprit.check_peaks) for specifics.
-See API documentation at link above or at help(check_peaks) for specifics.
-Generate and/or print and/or export a report of the HVSR analysis in a variety of formats. -See API documentation: [get_report()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.get_report)
-See API documentation at link above or at help(sprit.get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(sprit.get_report) for specifics.
-See API documentation at link above or at help(sprit.get_report) for specifics.
-See API documentation at link above or at help(sprit.get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(sprit.get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-See API documentation at link above or at help(get_report) for specifics.
-Export data into pickle format that can be read back in using import_data() so data does not need to -See API documentation: [export_data()](https://sprit.readthedocs.io/en/latest/sprit.html#sprit.export_data)
-See API documentation at link above or at help(sprit.export_data) for specifics.
-See API documentation at link above or at help(export_data) for specifics.
-See API documentation at link above or at help(sprit.export_data) for specifics.
-See API documentation at link above or at help(export_data) for specifics.
-If a single file/data point is being processed, a HVSRData object will be returned. Otherwise, it will be a HVSRBatch object. See their documention for more information.
-If the input parameter may not be read correctly. This is raised if the input_params() function fails. This raises an error since no other data processing or reading steps will be able to carried out correctly.
-If the data is not read/fetched correctly using fetch_data(), an error will be raised. This is raised if the fetch_data() function fails. This raises an error since no other data processing steps will be able to carried out correctly.
-If the data being processed is a single file, an error will be raised if generate_ppsds() does not work correctly. No errors are raised for remove_noise() errors (since that is an optional step) and the process_hvsr() step (since that is the last processing step) .
-Notes
-The sprit.run() function calls the following functions. This is the recommended order/set of functions to run to process HVSR using SpRIT. See the API documentation for these functions for more information: -- input_params(): The input_data parameter of input_params() is the only required variable, though others may also need to be called for your data to process correctly. -- fetch_data(): the source parameter of fetch_data() is the only explicit variable in the sprit.run() function aside from input_data and verbose. Everything else gets delivered to the correct function via the kwargs dictionary -- remove_noise(): by default, the kind of noise removal is remove_method=’auto’. See the remove_noise() documentation for more information. If remove_method is set to anything other than one of the explicit options in remove_noise, noise removal will not be carried out. -- generate_ppsds(): generates ppsds for each component, which will be combined/used later. Any parameter of obspy.signal.spectral_estimation.PPSD() may also be read into this function. -- remove_outlier_curves(): removes any outlier ppsd curves so that the data quality for when curves are combined will be enhanced. See the remove_outlier_curves() documentation for more information. -- process_hvsr(): this is the main function processing the hvsr curve and statistics. See process_hvsr() documentation for more details. The hvsr_band parameter sets the frequency spectrum over which these calculations occur. -- check_peaks(): this is the main function that will find and ‘score’ peaks to get a best peak. The parameter peak_freq_range can be set to limit the frequencies within which peaks are checked and scored. -- get_report(): this is the main function that will print, plot, and/or save the results of the data. See the get_report() API documentation for more information. -- export_data(): this function exports the final data output as a pickle file (by default, this pickle object has a .hvsr extension). This can be used to read data back into SpRIT without having to reprocess data.
-This script contains all the functions, classes, etc. to create a tkinter app for graphical user interface.
-Bases: object
Methods
-
|
-- |
- | - |
create_menubar |
-- |
create_tabs |
-- |
The default Windows terminal is not able to display the check mark character correctly. -This function returns another displayable character if platform is Windows
-Check time steps of PPSDS to make sure they are all the same length
-Check x_values of PPSDS to make sure they are all the same length
-Support function to check if a filepath is a pathlib.Path object and tries to convert if not
-Filepath to check. If not a valid filepath, will not convert and raises error
-pathlib.Path of filepath
-Private function to format time, used in other functions
-Formats input time to datetime objects in utc
-Input datetime. Can include date and time, just date (time inferred to be 00:00:00.00) or just time (if so, date is set as today)
-If string and not utc, assumed to be timezone of computer running the process. -If int, assumed to be offset from UTC (e.g., CST in the United States is -6; CDT in the United States is -5)
-Output datetime.datetime object, now in UTC time.
-def export_settings(hvsr_data, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True):
- """Save settings to json file
-
- Parameters
- ----------
- export_settings_path : str, default="default"
- Where to save the json file(s) containing the settings, by default 'default'.
- If "default," will save to sprit package resources. Otherwise, set a filepath location you would like for it to be saved to.
- If 'all' is selected, a directory should be supplied.
- Otherwise, it will save in the directory of the provided file, if it exists. Otherwise, defaults to the home directory.
- export_settings_type : str, {'all', 'instrument', 'processing'}
- What kind of settings to save.
- If 'all', saves all possible types in their respective json files.
- If 'instrument', save the instrument settings to their respective file.
- If 'processing', saves the processing settings to their respective file. By default 'all'
- include_location : bool, default=False, input CRS
- Whether to include the location parametersin the exported settings document.This includes xcoord, ycoord, elevation, elev_unit, and input_crs
- verbose : bool, default=True
- Whether to print outputs and information to the terminal
-
- """
- fnameDict = {}
- fnameDict['instrument'] = "instrument_settings.json"
- fnameDict['processing'] = "processing_settings.json"
-
- if export_settings_path == 'default' or export_settings_path is True:
- settingsPath = resource_dir.joinpath('settings')
- else:
- export_settings_path = pathlib.Path(export_settings_path)
- if not export_settings_path.exists():
- if not export_settings_path.parent.exists():
- print(f'The provided value for export_settings_path ({export_settings_path}) does not exist. Saving settings to the home directory: {pathlib.Path.home()}')
- settingsPath = pathlib.Path.home()
- else:
- settingsPath = export_settings_path.parent
-
- if export_settings_path.is_dir():
- settingsPath = export_settings_path
- elif export_settings_path.is_file():
- settingsPath = export_settings_path.parent
- fnameDict['instrument'] = export_settings_path.name+"_instrumentSettings.json"
- fnameDict['processing'] = export_settings_path.name+"_processingSettings.json"
-
- #Get final filepaths
- instSetFPath = settingsPath.joinpath(fnameDict['instrument'])
- procSetFPath = settingsPath.joinpath(fnameDict['processing'])
-
- #Get settings values
- instKeys = ["instrument", "net", "sta", "loc", "cha", "depth", "metapath", "hvsr_band"]
- inst_location_keys = ['xcoord', 'ycoord', 'elevation', 'elev_unit', 'input_crs']
- procFuncs = [fetch_data, remove_noise, generate_ppsds, process_hvsr, check_peaks, get_report]
-
- instrument_settings_dict = {}
- processing_settings_dict = {}
-
- for k in instKeys:
- if isinstance(hvsr_data[k], pathlib.PurePath):
- #For those that are paths and cannot be serialized
- instrument_settings_dict[k] = hvsr_data[k].as_posix()
- else:
- instrument_settings_dict[k] = hvsr_data[k]
-
- if include_location:
- for k in inst_location_keys:
- if isinstance(hvsr_data[k], pathlib.PurePath):
- #For those that are paths and cannot be serialized
- instrument_settings_dict[k] = hvsr_data[k].as_posix()
- else:
- instrument_settings_dict[k] = hvsr_data[k]
-
-
- for func in procFuncs:
- funcName = func.__name__
- processing_settings_dict[funcName] = {}
- for arg in hvsr_data['processing_parameters'][funcName]:
- if isinstance(hvsr_data['processing_parameters'][funcName][arg], (HVSRBatch, HVSRData)):
- pass
- else:
- processing_settings_dict[funcName][arg] = hvsr_data['processing_parameters'][funcName][arg]
-
- if verbose:
- print("Exporting Settings")
- #Save settings files
- if export_settings_type.lower()=='instrument' or export_settings_type.lower()=='all':
- try:
- with open(instSetFPath.with_suffix('.inst').as_posix(), 'w') as instSetF:
- jsonString = json.dumps(instrument_settings_dict, indent=2)
- #Format output for readability
- jsonString = jsonString.replace('\n ', ' ')
- jsonString = jsonString.replace('[ ', '[')
- jsonString = jsonString.replace('\n ]', ']')
- #Export
- instSetF.write(jsonString)
- except:
- instSetFPath = pathlib.Path.home().joinpath(instSetFPath.name)
- with open(instSetFPath.with_suffix('.inst').as_posix(), 'w') as instSetF:
- jsonString = json.dumps(instrument_settings_dict, indent=2)
- #Format output for readability
- jsonString = jsonString.replace('\n ', ' ')
- jsonString = jsonString.replace('[ ', '[')
- jsonString = jsonString.replace('\n ]', ']')
- #Export
- instSetF.write(jsonString)
-
- if verbose:
- print(f"Instrument settings exported to {instSetFPath}")
- print(f"{jsonString}")
- print()
- if export_settings_type.lower()=='processing' or export_settings_type.lower()=='all':
- try:
- with open(procSetFPath.with_suffix('.proc').as_posix(), 'w') as procSetF:
- jsonString = json.dumps(processing_settings_dict, indent=2)
- #Format output for readability
- jsonString = jsonString.replace('\n ', ' ')
- jsonString = jsonString.replace('[ ', '[')
- jsonString = jsonString.replace('\n ]', ']')
- jsonString = jsonString.replace('\n },','\n\t\t},\n')
- jsonString = jsonString.replace('{ "', '\n\t\t{\n\t\t"')
- jsonString = jsonString.replace(', "', ',\n\t\t"')
- jsonString = jsonString.replace('\n }', '\n\t\t}')
- jsonString = jsonString.replace(': {', ':\n\t\t\t{')
-
- #Export
- procSetF.write(jsonString)
- except:
- procSetFPath = pathlib.Path.home().joinpath(procSetFPath.name)
- with open(procSetFPath.with_suffix('.proc').as_posix(), 'w') as procSetF:
- jsonString = json.dumps(processing_settings_dict, indent=2)
- #Format output for readability
- jsonString = jsonString.replace('\n ', ' ')
- jsonString = jsonString.replace('[ ', '[')
- jsonString = jsonString.replace('\n ]', ']')
- jsonString = jsonString.replace('\n },','\n\t\t},\n')
- jsonString = jsonString.replace('{ "', '\n\t\t{\n\t\t"')
- jsonString = jsonString.replace(', "', ',\n\t\t"')
- jsonString = jsonString.replace('\n }', '\n\t\t}')
- jsonString = jsonString.replace(': {', ':\n\t\t\t{')
-
- #Export
- procSetF.write(jsonString)
- if verbose:
- print(f"Processing settings exported to {procSetFPath}")
- print(f"{jsonString}")
- print()
-
-def fetch_data(params, source='file', trim_dir=None, export_format='mseed', detrend='spline', detrend_order=2, update_metadata=True, plot_input_stream=False, verbose=False, **kwargs)
-
Fetch ambient seismic data from a source to read into obspy stream
-trim_dir
: None
or str
or pathlib obj
, default=None
export_format
: str='mseed'
detrend
: str
or bool
, default='spline'
detrend_order
: int
, default=2
update_metadata
: bool
, default=True
plot_input_stream
: bool
, default=False
verbose
: bool
, default=False
**kwargs
def fetch_data(params, source='file', trim_dir=None, export_format='mseed', detrend='spline', detrend_order=2, update_metadata=True, plot_input_stream=False, verbose=False, **kwargs):
- """Fetch ambient seismic data from a source to read into obspy stream
-
- Parameters
- ----------
- params : dict
- Dictionary containing all the necessary params to get data.
- Parameters defined using input_params() function.
- source : str, {'raw', 'dir', 'file', 'batch'}
- String indicating where/how data file was created. For example, if raw data, will need to find correct channels.
- 'raw' finds raspberry shake data, from raw output copied using scp directly from Raspberry Shake, either in folder or subfolders;
- 'dir' is used if the day's 3 component files (currently Raspberry Shake supported only) are all 3 contained in a directory by themselves.
- 'file' is used if the params['datapath'] specified in input_params() is the direct filepath to a single file to be read directly into an obspy stream.
- 'batch' is used to read a list or specified set of seismic files.
- Most commonly, a csv file can be read in with all the parameters. Each row in the csv is a separate file. Columns can be arranged by parameter.
- trim_dir : None or str or pathlib obj, default=None
- If None (or False), data is not trimmed in this function.
- Otherwise, this is the directory to save trimmed and exported data.
- export_format: str='mseed'
- If trim_dir is not None, this is the format in which to save the data
- detrend : str or bool, default='spline'
- If False, data is not detrended.
- Otherwise, this should be a string accepted by the type parameter of the obspy.core.trace.Trace.detrend method: https://docs.obspy.org/packages/autogen/obspy.core.trace.Trace.detrend.html
- detrend_order : int, default=2
- If detrend parameter is 'spline' or 'polynomial', this is passed directly to the order parameter of obspy.core.trace.Trace.detrend method.
- update_metadata : bool, default=True
- Whether to update the metadata file, used primarily with Raspberry Shake data which uses a generic inventory file.
- plot_input_stream : bool, default=False
- Whether to plot the raw input stream. This plot includes a spectrogram (Z component) and the raw (with decimation for speed) plots of each component signal.
- verbose : bool, default=False
- Whether to print outputs and inputs to the terminal
- **kwargs
- Keywords arguments, primarily for 'batch' and 'dir' sources
-
- Returns
- -------
- params : HVSRData or HVSRBatch object
- Same as params parameter, but with an additional "stream" attribute with an obspy data stream with 3 traces: Z (vertical), N (North-south), and E (East-west)
- """
- # Get intput paramaters
- orig_args = locals().copy()
- start_time = datetime.datetime.now()
-
- # Update with processing parameters specified previously in input_params, if applicable
- if 'processing_parameters' in params.keys():
- if 'fetch_data' in params['processing_parameters'].keys():
- defaultVDict = dict(zip(inspect.getfullargspec(fetch_data).args[1:],
- inspect.getfullargspec(fetch_data).defaults))
- defaultVDict['kwargs'] = kwargs
- for k, v in params['processing_parameters']['fetch_data'].items():
- # Manual input to function overrides the imported parameter values
- if k!='params' and k in orig_args.keys() and orig_args[k]==defaultVDict[k]:
- orig_args[k] = v
-
- #Update local variables, in case of previously-specified parameters
- source=orig_args['source']
- trim_dir=orig_args['trim_dir']
- export_format=orig_args['export_format']
- detrend=orig_args['detrend']
- detrend_order=orig_args['detrend_order']
- update_metadata=orig_args['update_metadata']
- plot_input_stream=orig_args['plot_input_stream']
- verbose=orig_args['verbose']
- kwargs=orig_args['kwargs']
-
- if source != 'batch' and verbose:
- print('\nFetching data (fetch_data())')
- print()
-
- params = get_metadata(params, update_metadata=update_metadata, source=source)
- inv = params['inv']
- date=params['acq_date']
-
- #Cleanup for gui input
- if isinstance(params['datapath'], (obspy.Stream, obspy.Trace)):
- pass
- elif '}' in str(params['datapath']):
- params['datapath'] = params['datapath'].as_posix().replace('{','')
- params['datapath'] = params['datapath'].split('}')
-
- sampleListNos = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
- sampleList = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'batch', 'sample', 'sample_batch']
- for s in sampleListNos:
- sampleList.append(f'sample{s}')
- sampleList.append(f'sample_{s}')
-
- #Make sure datapath is pointing to an actual file
- if isinstance(params['datapath'],list):
- for i, d in enumerate(params['datapath']):
- params['datapath'][i] = sprit_utils.checkifpath(str(d).strip(), sample_list=sampleList)
- dPath = params['datapath']
- elif isinstance(params['datapath'], (obspy.Stream, obspy.Trace)):
- pass
- else:
- dPath = sprit_utils.checkifpath(params['datapath'], sample_list=sampleList)
-
- inst = params['instrument']
-
- #Need to put dates and times in right formats first
- if type(date) is datetime.datetime:
- doy = date.timetuple().tm_yday
- year = date.year
- elif type(date) is datetime.date:
- date = datetime.datetime.combine(date, datetime.time(hour=0, minute=0, second=0))
- doy = date.timetuple().tm_yday
- year = date.year
- elif type(date) is tuple:
- if date[0]>366:
- raise ValueError('First item in date tuple must be day of year (0-366)', 0)
- elif date[1] > datetime.datetime.now().year:
- raise ValueError('Second item in date tuple should be year, but given item is in the future', 0)
- else:
- doy = date[0]
- year = date[1]
- elif type(date) is str:
- if '/' in date:
- dateSplit = date.split('/')
- elif '-' in date:
- dateSplit = date.split('-')
- else:
- dateSplit = date
-
- if int(dateSplit[0]) > 31:
- date = datetime.datetime(int(dateSplit[0]), int(dateSplit[1]), int(dateSplit[2]))
- doy = date.timetuple().tm_yday
- year = date.year
- elif int(dateSplit[0])<=12 and int(dateSplit[2]) > 31:
- warnings.warn("Preferred date format is 'yyyy-mm-dd' or 'yyyy/mm/dd'. Will attempt to parse date.")
- date = datetime.datetime(int(dateSplit[2]), int(dateSplit[0]), int(dateSplit[1]))
- doy = date.timetuple().tm_yday
- year = date.year
- else:
- warnings.warn("Preferred date format is 'yyyy-mm-dd' or 'yyyy/mm/dd'. Cannot parse date.")
- elif type(date) is int:
- doy = date
- year = datetime.datetime.today().year
- else: #FOR NOW, need to update
- date = datetime.datetime.now()
- doy = date.timetuple().tm_yday
- year = date.year
- warnings.warn("Did not recognize date, using year {} and day {}".format(year, doy))
-
- #Select which instrument we are reading from (requires different processes for each instrument)
- raspShakeInstNameList = ['raspberry shake', 'shake', 'raspberry', 'rs', 'rs3d', 'rasp. shake', 'raspshake']
- trominoNameList = ['tromino', 'trom', 'tromino 3g', 'tromino 3g+', 'tr', 't']
-
- #Get any kwargs that are included in obspy.read
- obspyReadKwargs = {}
- for argName in inspect.getfullargspec(obspy.read)[0]:
- if argName in kwargs.keys():
- obspyReadKwargs[argName] = kwargs[argName]
-
- #Select how reading will be done
- if source=='raw':
- try:
- if inst.lower() in raspShakeInstNameList:
- rawDataIN = __read_RS_file_struct(dPath, source, year, doy, inv, params, verbose=verbose)
-
- elif inst.lower() in trominoNameList:
- rawDataIN = read_tromino_files(dPath, params, verbose=verbose, **kwargs)
- except:
- raise RuntimeError(f"Data not fetched for {params['site']}. Check input parameters or the data file.")
- elif source=='stream' or isinstance(params, (obspy.Stream, obspy.Trace)):
- rawDataIN = params['datapath'].copy()
- elif source=='dir':
- if inst.lower() in raspShakeInstNameList:
- rawDataIN = __read_RS_file_struct(dPath, source, year, doy, inv, params, verbose=verbose)
- else:
- obspyFiles = {}
- for obForm in obspyFormats:
- temp_file_glob = pathlib.Path(dPath.as_posix().lower()).glob('.'+obForm.lower())
- for f in temp_file_glob:
- currParams = params
- currParams['datapath'] = f
-
- curr_data = fetch_data(params, source='file', #all the same as input, except just reading the one file using the source='file'
- trim_dir=trim_dir, export_format=export_format, detrend=detrend, detrend_order=detrend_order, update_metadata=update_metadata, verbose=verbose, **kwargs)
- curr_data.merge()
- obspyFiles[f.stem] = curr_data #Add path object to dict, with filepath's stem as the site name
- return HVSRBatch(obspyFiles)
- elif source=='file' and str(params['datapath']).lower() not in sampleList:
- if isinstance(dPath, list) or isinstance(dPath, tuple):
- rawStreams = []
- for datafile in dPath:
- rawStream = obspy.read(datafile, **obspyReadKwargs)
- rawStreams.append(rawStream) #These are actually streams, not traces
- for i, stream in enumerate(rawStreams):
- if i == 0:
- rawDataIN = obspy.Stream(stream) #Just in case
- else:
- rawDataIN = rawDataIN + stream #This adds a stream/trace to the current stream object
- elif str(dPath)[:6].lower()=='sample':
- pass
- else:
- rawDataIN = obspy.read(dPath, **obspyReadKwargs)#, starttime=obspy.core.UTCDateTime(params['starttime']), endttime=obspy.core.UTCDateTime(params['endtime']), nearest_sample =True)
- import warnings
- with warnings.catch_warnings():
- warnings.simplefilter(action='ignore', category=UserWarning)
- rawDataIN.attach_response(inv)
- elif source=='batch' and str(params['datapath']).lower() not in sampleList:
- if verbose:
- print('\nFetching data (fetch_data())')
- batch_data_read_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in batch_data_read.__code__.co_varnames}
- params = batch_data_read(input_data=params['datapath'], verbose=verbose, **batch_data_read_kwargs)
- params = HVSRBatch(params)
- return params
- elif str(params['datapath']).lower() in sampleList or f"sample{params['datapath'].lower()}" in sampleList:
- sample_data_dir = pathlib.Path(pkg_resources.resource_filename(__name__, 'resources/sample_data/'))
- if source=='batch':
- params['datapath'] = sample_data_dir.joinpath('Batch_SampleData.csv')
- params = batch_data_read(input_data=params['datapath'], batch_type='sample', verbose=verbose)
- params = HVSRBatch(params)
- return params
-
- elif source=='dir':
- params['datapath'] = sample_data_dir.joinpath('Batch_SampleData.csv')
- params = batch_data_read(input_data=params['datapath'], batch_type='sample', verbose=verbose)
- params = HVSRBatch(params)
- return params
-
- elif source=='file':
- params['datapath'] = str(params['datapath']).lower()
-
- if params['datapath'].lower() in sampleFileKeyMap.keys():
- params['datapath'] = sampleFileKeyMap[params['datapath'].lower()]
- else:
- params['datapath'] = sample_data_dir.joinpath('SampleHVSRSite1_AM.RAC84.00.2023.046_2023-02-15_1704-1734.MSEED')
-
- dPath = params['datapath']
- rawDataIN = obspy.read(dPath)#, starttime=obspy.core.UTCDateTime(params['starttime']), endttime=obspy.core.UTCDateTime(params['endtime']), nearest_sample =True)
- import warnings
- with warnings.catch_warnings():
- warnings.simplefilter(action='ignore', category=UserWarning)
- rawDataIN.attach_response(inv)
- else:
- try:
- rawDataIN = obspy.read(dPath)
- rawDataIN.attach_response(inv)
- except:
- RuntimeError(f'source={source} not recognized, and datapath cannot be read using obspy.read()')
-
- #Get metadata from the data itself, if not reading raw data
- try:
- dataIN = rawDataIN.copy()
- if source!='raw':
- #Use metadata from file for;
- # site
- if params['site'] == "HVSR Site":
- if isinstance(dPath, (list, tuple)):
- dPath = dPath[0]
- params['site'] = dPath.stem
- params['params']['site'] = dPath.stem
-
- # network
- if str(params['net']) == 'AM':
- params['net'] = dataIN[0].stats.network
- params['params']['net'] = dataIN[0].stats.network
-
- # station
- if str(params['sta']) == 'RAC84':
- params['sta'] = dataIN[0].stats.station
- params['params']['sta'] = dataIN[0].stats.station
-
- # loc
- if str(params['loc']) == '00':
- params['loc'] = dataIN[0].stats.location
- params['params']['loc'] = dataIN[0].stats.location
-
- # channels
- channelList = []
- if str(params['cha']) == ['EHZ', 'EHN', 'EHE']:
- for tr in dataIN:
- if tr.stats.channel not in channelList:
- channelList.append(tr.stats.channel)
- channelList.sort(reverse=True) #Just so z is first, just in case
- params['cha'] = channelList
- params['params']['cha'] = channelList
-
- # Acquisition date
- if str(params['acq_date']) == str(datetime.datetime.now().date()):
- params['acq_date'] = dataIN[0].stats.starttime.date
-
- # starttime
- today_Starttime = obspy.UTCDateTime(datetime.datetime(year=datetime.date.today().year, month=datetime.date.today().month,
- day = datetime.date.today().day,
- hour=0, minute=0, second=0, microsecond=0))
- maxStarttime = datetime.datetime(year=params['acq_date'].year, month=params['acq_date'].month, day=params['acq_date'].day,
- hour=0, minute=0, second=0, microsecond=0, tzinfo=datetime.timezone.utc)
- if str(params['starttime']) == str(today_Starttime):
- for tr in dataIN.merge():
- currTime = datetime.datetime(year=tr.stats.starttime.year, month=tr.stats.starttime.month, day=tr.stats.starttime.day,
- hour=tr.stats.starttime.hour, minute=tr.stats.starttime.minute,
- second=tr.stats.starttime.second, microsecond=tr.stats.starttime.microsecond, tzinfo=datetime.timezone.utc)
- if currTime > maxStarttime:
- maxStarttime = currTime
-
- newStarttime = obspy.UTCDateTime(datetime.datetime(year=params['acq_date'].year, month=params['acq_date'].month,
- day = params['acq_date'].day,
- hour=maxStarttime.hour, minute=maxStarttime.minute,
- second=maxStarttime.second, microsecond=maxStarttime.microsecond))
- params['starttime'] = newStarttime
- params['params']['starttime'] = newStarttime
-
- # endttime
- today_Endtime = obspy.UTCDateTime(datetime.datetime(year=datetime.date.today().year, month=datetime.date.today().month,
- day = datetime.date.today().day,
- hour=23, minute=59, second=59, microsecond=999999))
- tomorrow_Endtime = today_Endtime + (60*60*24)
- minEndtime = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)#(hour=23, minute=59, second=59, microsecond=999999)
- if str(params['endtime']) == str(today_Endtime) or str(params['endtime'])==tomorrow_Endtime:
- for tr in dataIN.merge():
- currTime = datetime.datetime(year=tr.stats.endtime.year, month=tr.stats.endtime.month, day=tr.stats.endtime.day,
- hour=tr.stats.endtime.hour, minute=tr.stats.endtime.minute,
- second=tr.stats.endtime.second, microsecond=tr.stats.endtime.microsecond, tzinfo=datetime.timezone.utc)
- if currTime < minEndtime:
- minEndtime = currTime
- newEndtime = obspy.UTCDateTime(datetime.datetime(year=minEndtime.year, month=minEndtime.month,
- day = minEndtime.day,
- hour=minEndtime.hour, minute=minEndtime.minute,
- second=minEndtime.second, microsecond=minEndtime.microsecond, tzinfo=datetime.timezone.utc))
- params['endtime'] = newEndtime
- params['params']['endtime'] = newEndtime
-
-
- #print(dataIN)
- #print(params['starttime'])
- #print(params['endtime'])
- dataIN = dataIN.split()
- dataIN = dataIN.trim(starttime=params['starttime'], endtime=params['endtime'])
- dataIN.merge()
- #print(dataIN)
- except:
- raise RuntimeError('Data not fetched. Check your input parameters or the data file.')
-
- #Trim and save data as specified
- if trim_dir=='None':
- trim_dir=None
- if not trim_dir:
- pass
- else:
- if isinstance(params, HVSRBatch):
- pass
- else:
- dataIN = _trim_data(input=params, stream=dataIN, export_dir=trim_dir, source=source, export_format=export_format)
-
- #Split data if masked array (if there are gaps)...detrending cannot be done without
- for tr in dataIN:
- if isinstance(tr.data, np.ma.masked_array):
- dataIN = dataIN.split()
- #Splits entire stream if any trace is masked_array
- break
-
- #Detrend data
- if isinstance(params, HVSRBatch):
- pass
- else:
- dataIN = __detrend_data(input=dataIN, detrend=detrend, detrend_order=detrend_order, verbose=verbose, source=source)
-
- #Remerge data
- dataIN = dataIN.merge(method=1)
-
- #Plot the input stream?
- if plot_input_stream:
- try:
- params['InputPlot'] = _plot_specgram_stream(stream=dataIN, params=params, component='Z', stack_type='linear', detrend='mean', dbscale=True, fill_gaps=None, ylimstd=3, return_fig=True, fig=None, ax=None, show_plot=False)
- #_get_removed_windows(input=dataIN, fig=params['InputPlot'][0], ax=params['InputPlot'][1], lineArtist =[], winArtist = [], existing_lineArtists=[], existing_xWindows=[], exist_win_format='matplotlib', keep_line_artists=True, time_type='matplotlib', show_plot=True)
- plt.show()
- except:
- print('Error with default plotting method, falling back to internal obspy plotting method')
- dataIN.plot(method='full', linewidth=0.25)
-
- #Sort channels (make sure Z is first, makes things easier later)
- if isinstance(params, HVSRBatch):
- pass
- else:
- dataIN = _sort_channels(input=dataIN, source=source, verbose=verbose)
-
- #Clean up the ends of the data unless explicitly specified to do otherwise (this is a kwarg, not a parameter)
- if 'clean_ends' not in kwargs.keys():
- clean_ends=True
- else:
- clean_ends = kwargs['clean_ends']
-
- if clean_ends:
- maxStarttime = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc) - datetime.timedelta(days=36500) #100 years ago
- minEndtime = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
-
- for tr in dataIN:
- currStarttime = datetime.datetime(year=tr.stats.starttime.year, month=tr.stats.starttime.month, day=tr.stats.starttime.day,
- hour=tr.stats.starttime.hour, minute=tr.stats.starttime.minute,
- second=tr.stats.starttime.second, microsecond=tr.stats.starttime.microsecond, tzinfo=datetime.timezone.utc)
- if currStarttime > maxStarttime:
- maxStarttime = currStarttime
-
- currEndtime = datetime.datetime(year=tr.stats.endtime.year, month=tr.stats.endtime.month, day=tr.stats.endtime.day,
- hour=tr.stats.endtime.hour, minute=tr.stats.endtime.minute,
- second=tr.stats.endtime.second, microsecond=tr.stats.endtime.microsecond, tzinfo=datetime.timezone.utc)
-
- if currEndtime < minEndtime:
- minEndtime = currEndtime
-
-
- maxStarttime = obspy.UTCDateTime(maxStarttime)
- minEndtime = obspy.UTCDateTime(minEndtime)
- dataIN = dataIN.split()
- for tr in dataIN:
- tr.trim(starttime=maxStarttime, endtime=minEndtime)
- pass
- dataIN.merge()
-
- params['batch'] = False #Set False by default, will get corrected later in batch mode
- params['input_stream'] = dataIN.copy()
- params['stream'] = dataIN.copy()
-
- if 'processing_parameters' not in params.keys():
- params['processing_parameters'] = {}
- params['processing_parameters']['fetch_data'] = {}
- for key, value in orig_args.items():
- params['processing_parameters']['fetch_data'][key] = value
-
-
- params['ProcessingStatus']['FetchDataStatus'] = True
- if verbose and not isinstance(params, HVSRBatch):
- dataINStr = dataIN.__str__().split('\n')
- for line in dataINStr:
- print('\t',line)
-
- params = _check_processing_status(params, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose)
-
- return params
-include_location
: bool
, default=False, input CRS
verbose
: bool
, default=True
+def fetch_data(params, source='file', data_export_path=None, data_export_format='mseed', detrend='spline', detrend_order=2, update_metadata=True, plot_input_stream=False, plot_engine='matplotlib', show_plot=True, verbose=False, **kwargs)
+
Fetch ambient seismic data from a source to read into obspy stream
+data_export_path
: None
or str
or pathlib obj
, default=None
data_export_format
: str='mseed'
detrend
: str
or bool
, default='spline'
detrend_order
: int
, default=2
update_metadata
: bool
, default=True
plot_input_stream
: bool
, default=False
plot_engine
: str
, default='matplotlib'
verbose
: bool
, default=False
**kwargs
def format_time(inputDT, tzone='UTC')
@@ -4044,172 +322,6 @@ Returns
outputTimeObj
: datetime object in UTC
Output datetime.datetime object, now in UTC time.
def format_time(inputDT, tzone='UTC'):
- """Private function to format time, used in other functions
-
- Formats input time to datetime objects in utc
-
- Parameters
- ----------
- inputDT : str or datetime obj
- Input datetime. Can include date and time, just date (time inferred to be 00:00:00.00) or just time (if so, date is set as today)
- tzone : str='utc' or int {'utc', 'local'}
- Timezone of data entry.
- If string and not utc, assumed to be timezone of computer running the process.
- If int, assumed to be offset from UTC (e.g., CST in the United States is -6; CDT in the United States is -5)
-
- Returns
- -------
- outputTimeObj : datetime object in UTC
- Output datetime.datetime object, now in UTC time.
-
- """
- if type(inputDT) is str:
- #tzone = 'America/Chicago'
- #Format string to datetime obj
- div = '-'
- timeDiv = 'T'
- if "/" in inputDT:
- div = '/'
- hasDate = True
- elif '-' in inputDT:
- div = '-'
- hasDate = True
- else:
- hasDate= False
- year = datetime.datetime.today().year
- month = datetime.datetime.today().month
- day = datetime.datetime.today().day
-
- if ':' in inputDT:
- hasTime = True
- if 'T' in inputDT:
- timeDiv = 'T'
- else:
- timeDiv = ' '
- else:
- hasTime = False
-
- if hasDate:
- #If first number is 4-dig year (assumes yyyy-dd-mm is not possible)
- if len(inputDT.split(div)[0])>2:
- year = inputDT.split(div)[0]
- month = inputDT.split(div)[1]
- day = inputDT.split(div)[2].split(timeDiv)[0]
-
- #If last number is 4-dig year
- elif len(inputDT.split(div)[2].split(timeDiv)[0])>2:
- #..and first number is day
- if int(inputDT.split(div)[0])>12:
- #dateStr = '%d'+div+'%m'+div+'%Y'
- year = inputDT.split(div)[2].split(timeDiv)[0]
- month = inputDT.split(div)[1]
- day = inputDT.split(div)[0]
- #...and first number is month (like American style)
- else:
- year = inputDT.split(div)[2].split(timeDiv)[0]
- month = inputDT.split(div)[0]
- day = inputDT.split(div)[1]
-
- #Another way to catch if first number is (2-digit) year
- elif int(inputDT.split(div)[0])>31:
- #dateStr = '%y'+div+'%m'+div+'%d'
- year = inputDT.split(div)[0]
- #Assumes anything less than current year is from this century
- if year < datetime.datetime.today().year:
- year = '20'+year
- else:#...and anything more than current year is from last century
- year = '19'+year
- #assumes day will always come last in this instance, as above
- month = inputDT.split(div)[1]
- day = inputDT.split(div)[2].split(timeDiv)[0]
-
- #If last digit is (2 digit) year
- elif int(inputDT.split(div)[2].split(timeDiv)[0])>31:
- #...and first digit is day
- if int(inputDT.split(div)[0])>12:
- #dateStr = '%d'+div+'%m'+div+'%y'
- year = inputDT.split(div)[2].split(timeDiv)[0]
- if year < datetime.datetime.today().year:
- year = '20'+year
- else:
- year = '19'+year
- month = inputDT.split(div)[1]
- day = inputDT.split(div)[0]
- else: #...and second digit is day
- #dateStr = '%m'+div+'%d'+div+'%y'
- year = inputDT.split(div)[2].split(timeDiv)[0]
- if year < datetime.datetime.today().year:
- year = '20'+year
- else:
- year = '19'+year
- month = inputDT.split(div)[0]
- day = inputDT.split(div)[1]
-
- hour=0
- minute=0
- sec=0
- microS=0
- if hasTime:
- if hasDate:
- timeStr = inputDT.split(timeDiv)[1]
- else:
- timeStr = inputDT
-
- if 'T' in timeStr:
- timeStr=timeStr.split('T')[1]
- elif ' ' in timeStr:
- timeStr=timeStr.split(' ')[1]
-
- timeStrList = timeStr.split(':')
- if len(timeStrList[0])>2:
- timeStrList[0] = timeStrList[0][-2:]
- elif int(timeStrList[0]) > 23:
- timeStrList[0] = timeStrList[0][-1:]
-
- if len(timeStrList) == 3:
- if '.' in timeStrList[2]:
- microS = int(timeStrList[2].split('.')[1])
- timeStrList[2] = timeStrList[2].split('.')[0]
- elif len(timeStrList) == 2:
- timeStrList.append('00')
-
- hour = int(timeStrList[0])
- minute=int(timeStrList[1])
- sec = int(timeStrList[2])
-
- outputTimeObj = datetime.datetime(year=int(year),month=int(month), day=int(day),
- hour=int(hour), minute=int(minute), second=int(sec), microsecond=int(microS))
-
- elif type(inputDT) is datetime.datetime or type(inputDT) is datetime.time:
- outputTimeObj = inputDT
-
- #Add timezone info
- availableTimezones = list(map(str.lower, zoneinfo.available_timezones()))
- if outputTimeObj.tzinfo is not None and outputTimeObj.tzinfo.utcoffset(outputTimeObj) is not None:
- #This is already timezone aware
- pass
- elif type(tzone) is int:
- outputTimeObj = outputTimeObj-datetime.timedelta(hours=tzone)
- elif type(tzone) is str:
- if tzone.lower() in availableTimezones:
- outputTimeObj = outputTimeObj.replace(tzinfo=zoneinfo.ZoneInfo(tzone))
- else:
- raise ValueError("Timezone {} is not in official list. \nAvailable timezones:\n{}".format(tzone, availableTimezones))
- elif isinstance(tzone, zoneinfo.ZoneInfo):
- outputTimeObj = outputTimeObj.replace(tzinfo=tzone)
- else:
- raise ValueError("Timezone must be either str or int")
-
- #Convert to UTC
- outputTimeObj = outputTimeObj.astimezone(datetime.timezone.utc)
-
- return outputTimeObj
-
def generate_ppsds(hvsr_data, azimuthal_ppsds=False, verbose=False, **ppsd_kwargs)
@@ -4229,7 +341,7 @@ Parameters
**ppsd_kwargs
: dict
Dictionary with keyword arguments that are passed directly to obspy.signal.PPSD.
If the following keywords are not specified, their defaults are amended in this function from the obspy defaults for its PPSD function. Specifically:
-- ppsd_length defaults to 60 (seconds) here instead of 3600
+- ppsd_length defaults to 30 (seconds) here instead of 3600
- skip_on_gaps defaults to True instead of False
- period_step_octaves defaults to 0.03125 instead of 0.125
@@ -4237,307 +349,12 @@ Returns
ppsds : HVSRData object
Dictionary containing entries with ppsds for each channel
-
-
-Expand source code
-
-def generate_ppsds(hvsr_data, azimuthal_ppsds=False, verbose=False, **ppsd_kwargs):
- """Generates PPSDs for each channel
-
- Channels need to be in Z, N, E order
- Info on PPSD creation here: https://docs.obspy.org/packages/autogen/obspy.signal.spectral_estimation.PPSD.html
-
- Parameters
- ----------
- hvsr_data : dict, HVSRData object, or HVSRBatch object
- Data object containing all the parameters and other data of interest (stream and paz, for example)
- azimuthal_ppsds : bool, default=False
- Whether to generate PPSDs for azimuthal data
- verbose : bool, default=True
- Whether to print inputs and results to terminal
- **ppsd_kwargs : dict
- Dictionary with keyword arguments that are passed directly to obspy.signal.PPSD.
- If the following keywords are not specified, their defaults are amended in this function from the obspy defaults for its PPSD function. Specifically:
- - ppsd_length defaults to 60 (seconds) here instead of 3600
- - skip_on_gaps defaults to True instead of False
- - period_step_octaves defaults to 0.03125 instead of 0.125
-
- Returns
- -------
- ppsds : HVSRData object
- Dictionary containing entries with ppsds for each channel
- """
- #First, divide up for batch or not
- orig_args = locals().copy() #Get the initial arguments
- start_time = datetime.datetime.now()
-
- ppsd_kwargs_sprit_defaults = ppsd_kwargs.copy()
- #Set defaults here that are different than obspy defaults
- if 'ppsd_length' not in ppsd_kwargs.keys():
- ppsd_kwargs_sprit_defaults['ppsd_length'] = 30.0
- if 'skip_on_gaps' not in ppsd_kwargs.keys():
- ppsd_kwargs_sprit_defaults['skip_on_gaps'] = True
- if 'period_step_octaves' not in ppsd_kwargs.keys():
- ppsd_kwargs_sprit_defaults['period_step_octaves'] = 0.03125
- if 'period_limits' not in ppsd_kwargs.keys():
- ppsd_kwargs_sprit_defaults['period_limits'] = [1/40, 1/1]
-
- #Get Probablistic power spectral densities (PPSDs)
- #Get default args for function
- def get_default_args(func):
- signature = inspect.signature(func)
- return {
- k: v.default
- for k, v in signature.parameters.items()
- if v.default is not inspect.Parameter.empty
- }
-
- ppsd_kwargs = get_default_args(PPSD)
- ppsd_kwargs.update(ppsd_kwargs_sprit_defaults)#Update with sprit defaults, or user input
- orig_args['ppsd_kwargs'] = ppsd_kwargs
-
- # Update with processing parameters specified previously in input_params, if applicable
- if 'processing_parameters' in hvsr_data.keys():
- if 'generate_ppsds' in hvsr_data['processing_parameters'].keys():
- defaultVDict = dict(zip(inspect.getfullargspec(generate_ppsds).args[1:],
- inspect.getfullargspec(generate_ppsds).defaults))
- defaultVDict['ppsd_kwargs'] = ppsd_kwargs
- for k, v in hvsr_data['processing_parameters']['generate_ppsds'].items():
- # Manual input to function overrides the imported parameter values
- if not isinstance(v, (HVSRData, HVSRBatch)) and (k in orig_args.keys()) and (orig_args[k]==defaultVDict[k]):
- orig_args[k] = v
-
- azimuthal_ppsds = orig_args['azimuthal_ppsds']
- verbose = orig_args['verbose']
- ppsd_kwargs = orig_args['ppsd_kwargs']
-
- if (verbose and isinstance(hvsr_data, HVSRBatch)) or (verbose and not hvsr_data['batch']):
- if isinstance(hvsr_data, HVSRData) and hvsr_data['batch']:
- pass
- else:
- print('\nGenerating Probabilistic Power Spectral Densities (generate_ppsds())')
- print('\tUsing the following parameters:')
- for key, value in orig_args.items():
- if key=='hvsr_data':
- pass
- else:
- print('\t {}={}'.format(key, value))
- print()
-
- #Site is in the keys anytime it's not batch
- if isinstance(hvsr_data, HVSRBatch):
- #If running batch, we'll loop through each one
- for site_name in hvsr_data.keys():
- args = orig_args.copy() #Make a copy so we don't accidentally overwrite
- individual_params = hvsr_data[site_name] #Get what would normally be the "hvsr_data" variable for each site
- args['hvsr_data'] = individual_params #reset the hvsr_data parameter we originally read in to an individual site hvsr_data
- #args['hvsr_data']['batch'] = False #Set to false, since only running this time
- if hvsr_data[site_name]['ProcessingStatus']['OverallStatus']:
- try:
- hvsr_data[site_name] = _generate_ppsds_batch(**args) #Call another function, that lets us run this function again
- except:
- hvsr_data[site_name]['ProcessingStatus']['PPSDStatus']=False
- hvsr_data[site_name]['ProcessingStatus']['OverallStatus'] = False
- else:
- hvsr_data[site_name]['ProcessingStatus']['PPSDStatus']=False
- hvsr_data[site_name]['ProcessingStatus']['OverallStatus'] = False
-
- try:
- sprit_gui.update_progress_bars(prog_percent=5)
- except Exception as e:
- pass
- #print(e)
- return hvsr_data
- else:
- paz = hvsr_data['paz']
- stream = hvsr_data['stream']
-
- if azimuthal_ppsds:
- #get azimuthal ppsds (in an HVSRBatch object?)
- pass
- else:
- #Get ppsds of e component
- eStream = stream.select(component='E')
- estats = eStream.traces[0].stats
- ppsdE = PPSD(estats, paz['E'], **ppsd_kwargs)
- ppsdE.add(eStream)
-
- #Get ppsds of n component
- nStream = stream.select(component='N')
- nstats = nStream.traces[0].stats
- ppsdN = PPSD(nstats, paz['N'], **ppsd_kwargs)
- ppsdN.add(nStream)
-
- #Get ppsds of z component
- zStream = stream.select(component='Z')
- zstats = zStream.traces[0].stats
- ppsdZ = PPSD(zstats, paz['Z'], **ppsd_kwargs)
- ppsdZ.add(zStream)
-
- ppsds = {'Z':ppsdZ, 'N':ppsdN, 'E':ppsdE}
-
- #Add to the input dictionary, so that some items can be manipulated later on, and original can be saved
- hvsr_data['ppsds_obspy'] = ppsds
- hvsr_data['ppsds'] = {}
- anyKey = list(hvsr_data['ppsds_obspy'].keys())[0]
-
- #Get ppsd class members
- members = [mems for mems in dir(hvsr_data['ppsds_obspy'][anyKey]) if not callable(mems) and not mems.startswith("_")]
- hvsr_data['ppsds']['Z'] = {}
- hvsr_data['ppsds']['E'] = {}
- hvsr_data['ppsds']['N'] = {}
-
- #Get lists/arrays so we can manipulate data later and copy everything over to main 'ppsds' subdictionary (convert lists to np.arrays for consistency)
- listList = ['times_data', 'times_gaps', 'times_processed','current_times_used', 'psd_values'] #Things that need to be converted to np.array first, for consistency
- timeKeys= ['times_processed','current_times_used','psd_values']
- timeDiffWarn = True
- dfList = []
- time_data = {}
- time_dict = {}
- for m in members:
- hvsr_data['ppsds']['Z'][m] = getattr(hvsr_data['ppsds_obspy']['Z'], m)
- hvsr_data['ppsds']['E'][m] = getattr(hvsr_data['ppsds_obspy']['E'], m)
- hvsr_data['ppsds']['N'][m] = getattr(hvsr_data['ppsds_obspy']['N'], m)
- if m in listList:
-
- hvsr_data['ppsds']['Z'][m] = np.array(hvsr_data['ppsds']['Z'][m])
- hvsr_data['ppsds']['E'][m] = np.array(hvsr_data['ppsds']['E'][m])
- hvsr_data['ppsds']['N'][m] = np.array(hvsr_data['ppsds']['N'][m])
-
- if str(m)=='times_processed':
- unique_times = np.unique(np.array([hvsr_data['ppsds']['Z'][m],
- hvsr_data['ppsds']['E'][m],
- hvsr_data['ppsds']['N'][m]]))
-
- common_times = []
- for currTime in unique_times:
- if currTime in hvsr_data['ppsds']['Z'][m]:
- if currTime in hvsr_data['ppsds']['E'][m]:
- if currTime in hvsr_data['ppsds']['N'][m]:
- common_times.append(currTime)
-
- cTimeIndList = []
- for cTime in common_times:
- ZArr = hvsr_data['ppsds']['Z'][m]
- EArr = hvsr_data['ppsds']['E'][m]
- NArr = hvsr_data['ppsds']['N'][m]
-
- cTimeIndList.append([int(np.where(ZArr == cTime)[0][0]),
- int(np.where(EArr == cTime)[0][0]),
- int(np.where(NArr == cTime)[0][0])])
-
- #Make sure number of time windows is the same between PPSDs (this can happen with just a few slightly different number of samples)
- if m in timeKeys:
- if str(m) != 'times_processed':
- time_data[str(m)] = (hvsr_data['ppsds']['Z'][m], hvsr_data['ppsds']['E'][m], hvsr_data['ppsds']['N'][m])
-
- #print(m, hvsr_data['ppsds']['Z'][m])
-
- tSteps_same = hvsr_data['ppsds']['Z'][m].shape[0] == hvsr_data['ppsds']['E'][m].shape[0] == hvsr_data['ppsds']['N'][m].shape[0]
-
- if not tSteps_same:
- shortestTimeLength = min(hvsr_data['ppsds']['Z'][m].shape[0], hvsr_data['ppsds']['E'][m].shape[0], hvsr_data['ppsds']['N'][m].shape[0])
-
- maxPctDiff = 0
- for comp in hvsr_data['ppsds'].keys():
- currCompTimeLength = hvsr_data['ppsds'][comp][m].shape[0]
- timeLengthDiff = currCompTimeLength - shortestTimeLength
- percentageDiff = timeLengthDiff / currCompTimeLength
- if percentageDiff > maxPctDiff:
- maxPctDiff = percentageDiff
-
- for comp in hvsr_data['ppsds'].keys():
- while hvsr_data['ppsds'][comp][m].shape[0] > shortestTimeLength:
- hvsr_data['ppsds'][comp][m] = hvsr_data['ppsds'][comp][m][:-1]
-
-
- if maxPctDiff > 0.05 and timeDiffWarn:
- warnings.warn(f"\t Number of ppsd time windows between different components is significantly different: {round(maxPctDiff*100,2)}% > 5%. Last windows will be trimmed.")
- elif verbose and timeDiffWarn:
- print(f"\t Number of ppsd time windows between different components is different by {round(maxPctDiff*100,2)}%. Last window(s) of components with larger number of ppsd windows will be trimmed.")
- timeDiffWarn = False #So we only do this warning once, even though there are multiple arrays that need to be trimmed
-
- for i, currTStep in enumerate(cTimeIndList):
- colList = []
- currTStepList = []
- colList.append('TimesProcessed_Obspy')
- currTStepList.append(common_times[i])
- for tk in time_data.keys():
- colList.append(str(tk)+'_Z')
- colList.append(str(tk)+'_E')
- colList.append(str(tk)+'_N')
- currTStepList.append(time_data[tk][0][currTStep[0]])#z
- currTStepList.append(time_data[tk][1][currTStep[1]])#e
- currTStepList.append(time_data[tk][2][currTStep[2]])#n
-
- dfList.append(currTStepList)
-
- hvsrDF = pd.DataFrame(dfList, columns=colList)
- hvsrDF['TimesProcessed_ObspyEnd'] = hvsrDF['TimesProcessed_Obspy'] + ppsd_kwargs['ppsd_length']
-
- #Add other times (for start times)
- def convert_to_datetime(obspyUTCDateTime):
- return obspyUTCDateTime.datetime.replace(tzinfo=datetime.timezone.utc)
-
- def convert_to_mpl_dates(obspyUTCDateTime):
- return obspyUTCDateTime.matplotlib_date
-
- hvsrDF['TimesProcessed'] = hvsrDF['TimesProcessed_Obspy'].apply(convert_to_datetime)
- hvsrDF['TimesProcessed_End'] = hvsrDF['TimesProcessed'] + datetime.timedelta(days=0,seconds=ppsd_kwargs['ppsd_length'])
- hvsrDF['TimesProcessed_MPL'] = hvsrDF['TimesProcessed_Obspy'].apply(convert_to_mpl_dates)
- hvsrDF['TimesProcessed_MPLEnd'] = hvsrDF['TimesProcessed_MPL'] + (ppsd_kwargs['ppsd_length']/86400)
-
- hvsrDF['Use'] = True
- hvsrDF['Use']=hvsrDF['Use'].astype(bool)
- for gap in hvsr_data['ppsds']['Z']['times_gaps']:
- hvsrDF['Use'] = (hvsrDF['TimesProcessed_MPL'].gt(gap[1].matplotlib_date))| \
- (hvsrDF['TimesProcessed_MPLEnd'].lt(gap[0].matplotlib_date))# | \
-
- hvsrDF['Use'] = hvsrDF['Use'].astype(bool)
- if 'xwindows_out' in hvsr_data.keys():
- for window in hvsr_data['xwindows_out']:
- hvsrDF['Use'] = (hvsrDF['TimesProcessed_MPL'][hvsrDF['Use']].lt(window[0]) & hvsrDF['TimesProcessed_MPLEnd'][hvsrDF['Use']].lt(window[0]) )| \
- (hvsrDF['TimesProcessed_MPL'][hvsrDF['Use']].gt(window[1]) & hvsrDF['TimesProcessed_MPLEnd'][hvsrDF['Use']].gt(window[1]))
- hvsrDF['Use'] = hvsrDF['Use'].astype(bool)
-
- hvsrDF.set_index('TimesProcessed', inplace=True)
- hvsr_data['hvsr_df'] = hvsrDF
- #Create dict entry to keep track of how many outlier hvsr curves are removed (2-item list with [0]=current number, [1]=original number of curves)
- hvsr_data['tsteps_used'] = [hvsrDF['Use'].sum(), hvsrDF['Use'].shape[0]]
- #hvsr_data['tsteps_used'] = [hvsr_data['ppsds']['Z']['times_processed'].shape[0], hvsr_data['ppsds']['Z']['times_processed'].shape[0]]
-
- hvsr_data['tsteps_used'][0] = hvsr_data['ppsds']['Z']['current_times_used'].shape[0]
-
- hvsr_data = sprit_utils.make_it_classy(hvsr_data)
-
- if 'processing_parameters' not in hvsr_data.keys():
- hvsr_data['processing_parameters'] = {}
- hvsr_data['processing_parameters']['generate_ppsds'] = {}
- for key, value in orig_args.items():
- hvsr_data['processing_parameters']['generate_ppsds'][key] = value
-
- hvsr_data['ProcessingStatus']['PPSDStatus'] = True
- hvsr_data = _check_processing_status(hvsr_data, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose)
- return hvsr_data
-
def get_char(in_char)
Outputs character with proper encoding/decoding
-
-
-Expand source code
-
-def get_char(in_char):
- """Outputs character with proper encoding/decoding"""
- if in_char in greek_chars.keys():
- out_char = greek_chars[in_char].encode(encoding='utf-8')
- else:
- out_char = in_char.encode(encoding='utf-8')
- return out_char.decode('utf-8')
-
def get_metadata(params, write_path='', update_metadata=True, source=None, **read_inventory_kwargs)
@@ -4562,511 +379,61 @@ Returns
params
: dict
Modified input dictionary with additional key:value pair containing paz dictionary (key = "paz")
-
-
-Expand source code
-
-def get_metadata(params, write_path='', update_metadata=True, source=None, **read_inventory_kwargs):
- """Get metadata and calculate or get paz parameter needed for PPSD
-
- Parameters
- ----------
- params : dict
- Dictionary containing all the input and other parameters needed for processing
- Ouput from input_params() function
- write_path : str
- String with output filepath of where to write updated inventory or metadata file
- If not specified, does not write file
- update_metadata : bool
- Whether to update the metadata file itself, or just read as-is. If using provided raspberry shake metadata file, select True.
- source : str, default=None
- This passes the source variable value to _read_RS_metadata. It is expected that this is passed directly from the source parameter of sprit.fetch_data()
-
- Returns
- -------
- params : dict
- Modified input dictionary with additional key:value pair containing paz dictionary (key = "paz")
- """
- invPath = params['metapath']
- raspShakeInstNameList = ['raspberry shake', 'shake', 'raspberry', 'rs', 'rs3d', 'rasp. shake', 'raspshake']
- trominoNameList = ['tromino', 'trom', 'trm', 't']
- if params['instrument'].lower() in raspShakeInstNameList:
- if update_metadata:
- params = _update_shake_metadata(filepath=invPath, params=params, write_path=write_path)
- params = _read_RS_Metadata(params, source=source)
- elif params['instrument'].lower() in trominoNameList:
- params['paz'] = {'Z':{}, 'E':{}, 'N':{}}
- #ALL THESE VALUES ARE PLACEHOLDERS, taken from RASPBERRY SHAKE! (Needed for PPSDs)
- params['paz']['Z'] = {'sensitivity': 360000000.0,
- 'gain': 360000000.0,
- 'poles': [(-1+0j), (-3.03+0j), (-3.03+0j), (-666.67+0j)],
- 'zeros': [0j, 0j, 0j]}
- params['paz']['E'] = params['paz']['Z']
- params['paz']['N'] = params['paz']['Z']
-
- channelObj_Z = obspy.core.inventory.channel.Channel(code='BHZ', location_code='00', latitude=params['params']['latitude'],
- longitude=params['params']['longitude'], elevation=params['params']['elevation'], depth=params['params']['depth'],
- azimuth=0, dip=90, types=None, external_references=None,
- sample_rate=None, sample_rate_ratio_number_samples=None, sample_rate_ratio_number_seconds=None,
- storage_format=None, clock_drift_in_seconds_per_sample=None, calibration_units=None,
- calibration_units_description=None, sensor=None, pre_amplifier=None, data_logger=None,
- equipments=None, response=None, description=None, comments=None, start_date=None, end_date=None,
- restricted_status=None, alternate_code=None, historical_code=None, data_availability=None,
- identifiers=None, water_level=None, source_id=None)
- channelObj_E = obspy.core.inventory.channel.Channel(code='BHE', location_code='00', latitude=params['params']['latitude'],
- longitude=params['params']['longitude'], elevation=params['params']['elevation'], depth=params['params']['depth'],
- azimuth=90, dip=0)
-
- channelObj_N = obspy.core.inventory.channel.Channel(code='BHN', location_code='00', latitude=params['params']['latitude'],
- longitude=params['params']['longitude'], elevation=params['params']['elevation'], depth=params['params']['depth'],
- azimuth=0, dip=0)
-
- siteObj = obspy.core.inventory.util.Site(name=params['params']['site'], description=None, town=None, county=None, region=None, country=None)
- stationObj = obspy.core.inventory.station.Station(code='TZ', latitude=params['params']['latitude'], longitude=params['params']['longitude'],
- elevation=params['params']['elevation'], channels=[channelObj_Z, channelObj_E, channelObj_N], site=siteObj,
- vault=None, geology=None, equipments=None, operators=None, creation_date=datetime.datetime.today(),
- termination_date=None, total_number_of_channels=None,
- selected_number_of_channels=None, description='Estimated data for Tromino, this is NOT from the manufacturer',
- comments=None, start_date=None,
- end_date=None, restricted_status=None, alternate_code=None, historical_code=None,
- data_availability=None, identifiers=None, water_level=None, source_id=None)
-
- network = [obspy.core.inventory.network.Network(code='TROM', stations=[stationObj], total_number_of_stations=None,
- selected_number_of_stations=None, description=None, comments=None, start_date=None,
- end_date=None, restricted_status=None, alternate_code=None, historical_code=None,
- data_availability=None, identifiers=None, operators=None, source_id=None)]
-
- params['inv'] = obspy.Inventory(networks=network)
- else:
- if not invPath:
- pass #if invPath is None
- elif not pathlib.Path(invPath).exists() or invPath=='':
- warnings.warn(f"The metapath parameter was not specified correctly. Returning original params value {params['metapath']}")
- readInvKwargs = {}
- argspecs = inspect.getfullargspec(obspy.read_inventory)
- for argName in argspecs[0]:
- if argName in read_inventory_kwargs.keys():
- readInvKwargs[argName] = read_inventory_kwargs[argName]
-
- readInvKwargs['path_or_file_object'] = invPath
- params['inv'] = obspy.read_inventory(invPath)
- if 'params' in params.keys():
- params['params']['inv'] = params['inv']
-
- return params
-
-def get_report(hvsr_results, report_format='print', plot_type='HVSR p ann C+ p ann Spec', export_path=None, csv_overwrite_opt='append', no_output=False, verbose=False)
+def get_report(hvsr_results, report_formats=['print', 'table', 'plot', 'html', 'pdf'], azimuth='HV', plot_type='HVSR p ann C+ p ann Spec p ann', plot_engine='matplotlib', show_print_report=True, show_table_report=False, show_plot_report=True, show_html_report=False, show_pdf_report=True, suppress_report_outputs=False, show_report_outputs=False, csv_handling='append', report_export_format=None, report_export_path=None, verbose=False, **kwargs)
-Get a report of the HVSR analysis in a variety of formats.
+Generate and/or print and/or export a report of the HVSR analysis in a variety of formats.
+Formats include:
+* 'print': A (monospace) text summary of the HVSR results
+* 'table': A pandas.DataFrame summary of the HVSR Results.
+This is useful for copy/pasting directly into a larger worksheet.
+* 'plot': A plot summary of the HVSR results, generated using the plot_hvsr() function.
+* 'html': An HTML document/text of the HVSR results. This includes the table, print, and plot reports in one document.
+* 'pdf': A PDF document showing the summary of the HVSR Results.
+The PDF report is simply the HTML report saved to an A4-sized PDF document.
Parameters
hvsr_results
: dict
- Dictionary containing all the information about the processed hvsr data
-report_format
: {'csv', 'print', plot}
+report_formats
: {'table', 'print', plot}
- Format in which to print or export the report.
The following report_formats return the following items in the following attributes:
-- 'plot': hvsr_results['Print_Report'] as a str str
+- 'plot': hvsr_results['Print_Report'] as a str
- 'print': hvsr_results['HV_Plot'] - matplotlib.Figure object
-- 'csv':
-hvsr_results['CSV_Report']- pandas.DataFrame object
-- list/tuple - a list or tuple of the above objects, in the same order they are in the report_format list
+- 'table':
+hvsr_results['Table_Report']- pandas.DataFrame object
+- list/tuple - a list or tuple of the above objects, in the same order they are in the report_formats list
+- 'html': hvsr_results['HTML_Report'] - a string containing the text for an HTML document
+- 'pdf': currently does not save to the HVSRData object itself, can only be saved to the disk directly
plot_type
: str
, default = 'HVSR p ann C+ p ann Spec
-- What type of plot to plot, if 'plot' part of report_format input
-export_path
: None, bool,
or filepath
, default = None
-- If None or False, does not export; if True, will export to same directory as the datapath parameter in the input_params() function.
+
- What type of plot to plot, if 'plot' part of report_formats input
+ azimuth
: str
, default = 'HV'
+- Which azimuth to plot, by default "HV" which is the main "azimuth" combining the E and N components
+csv_handling
: str, {'append', 'overwrite', 'keep/rename'}
+- How to handle table report outputs if the designated csv output file already exists. By default, appends the new information to the end of the existing file.
+suppress_report_outputs
: bool
, default=False
+- If True, only reads output to appropriate attribute of data class (ie, print does not print, only reads text into variable). If False, performs as normal.
+report_export_format
: list
or str
, default=['pdf']
+- A string or list of strings indicating which report formats should be exported to disk.
+report_export_path
: None, bool,
or filepath
, default = None
+- If None or False, does not export; if True, will export to same directory as the input_data parameter in the input_params() function.
Otherwise, it should be a string or path object indicating where to export results. May be a file or directory.
If a directory is specified, the filename will be
-"
-". The suffix defaults to png for report_format="plot", csv for 'csv', and does not export if 'print.'
-csv_overwrite_opts
: str, {'append', 'overwrite', 'keep/rename'}
-- How to handle csv report outputs if the designated csv output file already exists. By default, appends the new information to the end of the existing file.
-no_output
: bool
, default=False
-- If True, only reads output to appropriate attribute of data class (ie, print does not print, only reads text into variable). If False, performs as normal.
+"-".
+The extension/suffix defaults to png for report_formats="plot", csv for 'table', txt for 'print', html for 'html', and pdf for 'pdf.'
verbose
: bool
, default=True
-- Whether to print the results to terminal. This is the same output as report_format='print', and will not repeat if that is already selected
+- Whether to print the results to terminal. This is the same output as report_formats='print', and will not repeat if that is already selected
Returns
-
-
-Expand source code
-
-def get_report(hvsr_results, report_format='print', plot_type='HVSR p ann C+ p ann Spec', export_path=None, csv_overwrite_opt='append', no_output=False, verbose=False):
- """Get a report of the HVSR analysis in a variety of formats.
-
- Parameters
- ----------
- hvsr_results : dict
- Dictionary containing all the information about the processed hvsr data
- report_format : {'csv', 'print', plot}
- Format in which to print or export the report.
- The following report_formats return the following items in the following attributes:
- - 'plot': hvsr_results['Print_Report'] as a str str
- - 'print': hvsr_results['HV_Plot'] - matplotlib.Figure object
- - 'csv': hvsr_results['CSV_Report']- pandas.DataFrame object
- - list/tuple - a list or tuple of the above objects, in the same order they are in the report_format list
- plot_type : str, default = 'HVSR p ann C+ p ann Spec
- What type of plot to plot, if 'plot' part of report_format input
- export_path : None, bool, or filepath, default = None
- If None or False, does not export; if True, will export to same directory as the datapath parameter in the input_params() function.
- Otherwise, it should be a string or path object indicating where to export results. May be a file or directory.
- If a directory is specified, the filename will be "<site_name>_<acq_date>_<UTC start time>-<UTC end time>". The suffix defaults to png for report_format="plot", csv for 'csv', and does not export if 'print.'
- csv_overwrite_opts : str, {'append', 'overwrite', 'keep/rename'}
- How to handle csv report outputs if the designated csv output file already exists. By default, appends the new information to the end of the existing file.
- no_output : bool, default=False
- If True, only reads output to appropriate attribute of data class (ie, print does not print, only reads text into variable). If False, performs as normal.
- verbose : bool, default=True
- Whether to print the results to terminal. This is the same output as report_format='print', and will not repeat if that is already selected
-
- Returns
- -------
- sprit.HVSRData
- """
- orig_args = locals().copy() #Get the initial arguments
-
- # Update with processing parameters specified previously in input_params, if applicable
- if 'processing_parameters' in hvsr_results.keys():
- if 'get_report' in hvsr_results['processing_parameters'].keys():
- for k, v in hvsr_results['processing_parameters']['get_report'].items():
- defaultVDict = dict(zip(inspect.getfullargspec(get_report).args[1:],
- inspect.getfullargspec(get_report).defaults))
- # Manual input to function overrides the imported parameter values
- if (not isinstance(v, (HVSRData, HVSRBatch))) and (k in orig_args.keys()) and (orig_args[k]==defaultVDict[k]):
- orig_args[k] = v
-
- report_format = orig_args['report_format']
- plot_type = orig_args['plot_type']
- export_path = orig_args['export_path']
- csv_overwrite_opt = orig_args['csv_overwrite_opt']
- no_output = orig_args['no_output']
- verbose = orig_args['verbose']
-
- if (verbose and isinstance(hvsr_results, HVSRBatch)) or (verbose and not hvsr_results['batch']):
- if isinstance(hvsr_results, HVSRData) and hvsr_results['batch']:
- pass
- else:
- print('\nGetting HVSR Report: get_report()')
- print('\tUsing the following parameters:')
- for key, value in orig_args.items():
- if key=='params':
- pass
- else:
- print('\t {}={}'.format(key, value))
- print()
-
- if isinstance(hvsr_results, HVSRBatch):
- if verbose:
- print('\nGetting Reports: Running in batch mode')
-
- print('\tUsing parameters:')
- for key, value in orig_args.items():
- print(f'\t {key}={value}')
- print()
- #If running batch, we'll loop through each site
- for site_name in hvsr_results.keys():
- args = orig_args.copy() #Make a copy so we don't accidentally overwrite
- individual_params = hvsr_results[site_name] #Get what would normally be the "params" variable for each site
- args['hvsr_results'] = individual_params #reset the params parameter we originally read in to an individual site params
- if hvsr_results[site_name]['ProcessingStatus']['OverallStatus']:
- try:
- hvsr_results[site_name] = _get_report_batch(**args) #Call another function, that lets us run this function again
- except:
- hvsr_results[site_name] = hvsr_results[site_name]
- else:
- hvsr_results[site_name] = hvsr_results[site_name]
-
- combined_csvReport = pd.DataFrame()
- for site_name in hvsr_results.keys():
- if 'CSV_Report' in hvsr_results[site_name].keys():
- combined_csvReport = pd.concat([combined_csvReport, hvsr_results[site_name]['CSV_Report']], ignore_index=True, join='inner')
-
- if export_path is not None:
- if export_path is True:
- if pathlib.Path(hvsr_results['input_params']['datapath']) in sampleFileKeyMap.values():
- csvExportPath = pathlib.Path(os.getcwd())
- else:
- csvExportPath = pathlib.Path(hvsr_results['input_params']['datapath'])
- elif pathlib.Path(export_path).is_dir():
- csvExportPath = export_path
- elif pathlib.Path(export_path).is_file():
- csvExportPath = export_path.parent
- else:
- csvExportPath = pathlib.Path(hvsr_results[site_name].datapath)
- if csvExportPath.is_dir():
- pass
- else:
- csvExportPath = csvExportPath.parent
-
- combined_csvReport.to_csv(csvExportPath, index=False)
-
- else:
- #if 'BestPeak' in hvsr_results.keys() and 'PassList' in hvsr_results['BestPeak'].keys():
- try:
- curvTestsPassed = (hvsr_results['BestPeak']['PassList']['WindowLengthFreq.'] +
- hvsr_results['BestPeak']['PassList']['SignificantCycles']+
- hvsr_results['BestPeak']['PassList']['LowCurveStDevOverTime'])
- curvePass = curvTestsPassed > 2
-
- #Peak Pass?
- peakTestsPassed = ( hvsr_results['BestPeak']['PassList']['PeakProminenceBelow'] +
- hvsr_results['BestPeak']['PassList']['PeakProminenceAbove']+
- hvsr_results['BestPeak']['PassList']['PeakAmpClarity']+
- hvsr_results['BestPeak']['PassList']['FreqStability']+
- hvsr_results['BestPeak']['PassList']['PeakStability_FreqStD']+
- hvsr_results['BestPeak']['PassList']['PeakStability_AmpStD'])
- peakPass = peakTestsPassed >= 5
- except Exception as e:
- errMsg= 'No BestPeak identified. Check peak_freq_range or hvsr_band or try to remove bad noise windows using remove_noise() or change processing parameters in process_hvsr() or generate_ppsds(). Otherwise, data may not be usable for HVSR.'
- print(errMsg)
- print(e)
- return hvsr_results
- #raise RuntimeError('No BestPeak identified. Check peak_freq_range or hvsr_band or try to remove bad noise windows using remove_noise() or change processing parameters in process_hvsr() or generate_ppsds(). Otherwise, data may not be usable for HVSR.')
-
- if isinstance(report_format, (list, tuple)):
- pass
- else:
- #We will use a loop later even if it's just one report type, so reformat to prepare for for loop
- allList = [':', 'all']
- if report_format.lower() in allList:
- report_format = ['print', 'csv', 'plot']
- else:
- report_format = [report_format]
-
- def export_report(export_obj, _export_path, _rep_form):
- if _export_path is None:
- return
- else:
- if _rep_form == 'csv':
- ext = '.csv'
- elif _rep_form =='plot':
- ext='.png'
- else:
- ext=''
-
- sitename=hvsr_results['input_params']['site']#.replace('.', '-')
- fname = f"{sitename}_{hvsr_results['input_params']['acq_date']}_{str(hvsr_results['input_params']['starttime'].time)[:5]}-{str(hvsr_results['input_params']['endtime'].time)[:5]}{ext}"
- fname = fname.replace(':', '')
-
- if _export_path==True:
- #Check so we don't write in sample directory
- if pathlib.Path(hvsr_results['input_params']['datapath']) in sampleFileKeyMap.values():
- if pathlib.Path(os.getcwd()) in sampleFileKeyMap.values(): #Just in case current working directory is also sample directory
- inFile = pathlib.Path.home() #Use the path to user's home if all else fails
- else:
- inFile = pathlib.Path(os.getcwd())
- else:
- inFile = pathlib.Path(hvsr_results['input_params']['datapath'])
-
- if inFile.is_dir():
- outFile = inFile.joinpath(fname)
- else:
- outFile = inFile.with_name(fname)
- else:
- if pathlib.Path(_export_path).is_dir():
- outFile = pathlib.Path(_export_path).joinpath(fname)
- else:
- outFile=pathlib.Path(_export_path)
-
- if _rep_form == 'csv':
- if outFile.exists():
- existFile = pd.read_csv(outFile)
- if csv_overwrite_opt.lower() == 'append':
- export_obj = pd.concat([existFile, export_obj], ignore_index=True, join='inner')
- elif csv_overwrite_opt.lower() == 'overwrite':
- pass
- else:# csv_overwrite_opt.lower() in ['keep', 'rename']:
- fileNameExists = True
- i=1
- while fileNameExists:
- outFile = outFile.with_stem(f"{outFile.stem}_{i}")
- i+=1
- if not outFile.exists():
- fileNameExists = False
- try:
- print(f'\nSaving csv data to: {outFile}')
- export_obj.to_csv(outFile, index_label='ID')
- except:
- warnings.warn("Report not exported. \n\tDataframe to be exported as csv has been saved in hvsr_results['BestPeak']['Report']['CSV_Report]", category=RuntimeWarning)
- elif _rep_form =='plot':
- if verbose:
- print(f'\nSaving plot to: {outFile}')
- plt.scf = export_obj
- plt.savefig(outFile)
- return
-
- def report_output(_report_format, _plot_type='HVSR p ann C+ p ann Spec', _export_path=None, _no_output=False, verbose=False):
- if _report_format=='print':
- #Print results
-
- #Make separators for nicely formatted print output
- sepLen = 99
- siteSepSymbol = '='
- intSepSymbol = u"\u2013"
- extSepSymbol = u"\u2014"
-
- if sepLen % 2 == 0:
- remainVal = 1
- else:
- remainVal = 0
-
- siteWhitespace = 2
- #Format the separator lines internal to each site
- internalSeparator = intSepSymbol.center(sepLen-4, intSepSymbol).center(sepLen, ' ')
-
- extSiteSeparator = "".center(sepLen, extSepSymbol)
- siteSeparator = f"{hvsr_results['input_params']['site']}".center(sepLen - siteWhitespace, ' ').center(sepLen, siteSepSymbol)
- endSiteSeparator = "".center(sepLen, siteSepSymbol)
-
- #Start building list to print
- report_string_list = []
- report_string_list.append("") #Blank line to start
- report_string_list.append(extSiteSeparator)
- report_string_list.append(siteSeparator)
- report_string_list.append(extSiteSeparator)
- #report_string_list.append(internalSeparator)
- report_string_list.append('')
- report_string_list.append(f"\tSite Name: {hvsr_results['input_params']['site']}")
- report_string_list.append(f"\tAcq. Date: {hvsr_results['input_params']['acq_date']}")
- report_string_list.append(f"\tLocation : {hvsr_results['input_params']['longitude']}, {hvsr_results['input_params']['latitude']}")
- report_string_list.append(f"\tElevation: {hvsr_results['input_params']['elevation']}")
- report_string_list.append('')
- report_string_list.append(internalSeparator)
- report_string_list.append('')
- if 'BestPeak' not in hvsr_results.keys():
- report_string_list.append('\tNo identifiable BestPeak was present between {} for {}'.format(hvsr_results['input_params']['hvsr_band'], hvsr_results['input_params']['site']))
- else:
- report_string_list.append('\t{0:.3f} Hz Peak Frequency'.format(hvsr_results['BestPeak']['f0']))
- if curvePass and peakPass:
- report_string_list.append('\t {} Curve at {} Hz passed quality checks! ☺ :D'.format(sprit_utils.check_mark(), round(hvsr_results['BestPeak']['f0'],3)))
- else:
- report_string_list.append('\t {} Peak at {} Hz did NOT pass quality checks ☹:('.format(sprit_utils.x_mark(), round(hvsr_results['BestPeak']['f0'],3)))
- report_string_list.append('')
- report_string_list.append(internalSeparator)
- report_string_list.append('')
-
- justSize=34
- #Print individual results
- report_string_list.append('\tCurve Tests: {}/3 passed (3/3 needed)'.format(curvTestsPassed))
- report_string_list.append(f"\t\t {hvsr_results['BestPeak']['Report']['Lw'][-1]}"+" Length of processing windows".ljust(justSize)+f"{hvsr_results['BestPeak']['Report']['Lw']}")
- report_string_list.append(f"\t\t {hvsr_results['BestPeak']['Report']['Nc'][-1]}"+" Number of significant cycles".ljust(justSize)+f"{hvsr_results['BestPeak']['Report']['Nc']}")
- report_string_list.append(f"\t\t {hvsr_results['BestPeak']['Report']['σ_A(f)'][-1]}"+" Small H/V StDev over time".ljust(justSize)+f"{hvsr_results['BestPeak']['Report']['σ_A(f)']}")
-
- report_string_list.append('')
- report_string_list.append("\tPeak Tests: {}/6 passed (5/6 needed)".format(peakTestsPassed))
- report_string_list.append(f"\t\t {hvsr_results['BestPeak']['Report']['A(f-)'][-1]}"+" Peak is prominent below".ljust(justSize)+f"{hvsr_results['BestPeak']['Report']['A(f-)']}")
- report_string_list.append(f"\t\t {hvsr_results['BestPeak']['Report']['A(f+)'][-1]}"+" Peak is prominent above".ljust(justSize)+f"{hvsr_results['BestPeak']['Report']['A(f+)']}")
- report_string_list.append(f"\t\t {hvsr_results['BestPeak']['Report']['A0'][-1]}"+" Peak is large".ljust(justSize)+f"{hvsr_results['BestPeak']['Report']['A0']}")
- if hvsr_results['BestPeak']['PassList']['FreqStability']:
- res = sprit_utils.check_mark()
- else:
- res = sprit_utils.x_mark()
- report_string_list.append(f"\t\t {res}"+ " Peak freq. is stable over time".ljust(justSize)+ f"{hvsr_results['BestPeak']['Report']['P-'][:5]} and {hvsr_results['BestPeak']['Report']['P+'][:-1]} {res}")
- report_string_list.append(f"\t\t {hvsr_results['BestPeak']['Report']['Sf'][-1]}"+" Stability of peak (Freq. StDev)".ljust(justSize)+f"{hvsr_results['BestPeak']['Report']['Sf']}")
- report_string_list.append(f"\t\t {hvsr_results['BestPeak']['Report']['Sa'][-1]}"+" Stability of peak (Amp. StDev)".ljust(justSize)+f"{hvsr_results['BestPeak']['Report']['Sa']}")
- report_string_list.append('')
- report_string_list.append(f"Calculated using {hvsr_results['hvsr_df']['Use'].sum()}/{hvsr_results['hvsr_df']['Use'].count()} time windows".rjust(sepLen-1))
- report_string_list.append(extSiteSeparator)
- #report_string_list.append(endSiteSeparator)
- #report_string_list.append(extSiteSeparator)
- report_string_list.append('')
-
- reportStr=''
- #Now print it
- for line in report_string_list:
- reportStr = reportStr+'\n'+line
-
- if not _no_output:
- print(reportStr)
-
- export_report(export_obj=reportStr, _export_path=_export_path, _rep_form=_report_format)
- hvsr_results['BestPeak']['Report']['Print_Report'] = reportStr
- hvsr_results['Print_Report'] = reportStr
-
- elif _report_format=='csv':
- import pandas as pd
- pdCols = ['Site Name', 'Acq_Date', 'Longitude', 'Latitide', 'Elevation', 'PeakFrequency',
- 'WindowLengthFreq.','SignificantCycles','LowCurveStDevOverTime',
- 'PeakProminenceBelow','PeakProminenceAbove','PeakAmpClarity','FreqStability', 'PeakStability_FreqStD','PeakStability_AmpStD', 'PeakPasses']
- d = hvsr_results
- criteriaList = []
- for p in hvsr_results['BestPeak']["PassList"]:
- criteriaList.append(hvsr_results['BestPeak']["PassList"][p])
- criteriaList.append(hvsr_results['BestPeak']["PeakPasses"])
- dfList = [[d['input_params']['site'], d['input_params']['acq_date'], d['input_params']['longitude'], d['input_params']['latitude'], d['input_params']['elevation'], round(d['BestPeak']['f0'], 3)]]
- dfList[0].extend(criteriaList)
- outDF = pd.DataFrame(dfList, columns=pdCols)
-
- if verbose:
- print('\nCSV Report:\n')
- maxColWidth = 13
- print(' ', end='')
- for col in outDF.columns:
- if len(str(col)) > maxColWidth:
- colStr = str(col)[:maxColWidth-3]+'...'
- else:
- colStr = str(col)
- print(colStr.ljust(maxColWidth), end=' ')
- print() #new line
- for c in range(len(outDF.columns) * (maxColWidth+2)):
- if c % (maxColWidth+2) == 0:
- print('|', end='')
- else:
- print('-', end='')
- print('|') #new line
- print(' ', end='') #Small indent at start
- for row in outDF.iterrows():
- for col in row[1]:
- if len(str(col)) > maxColWidth:
- colStr = str(col)[:maxColWidth-3]+'...'
- else:
- colStr = str(col)
- print(colStr.ljust(maxColWidth), end=' ')
- print()
-
- try:
- export_report(export_obj=outDF, _export_path=_export_path, _rep_form=_report_format)
- except:
- print("Error in exporting csv report. CSV not exported")
- hvsr_results['BestPeak']['Report']['CSV_Report'] = outDF
- hvsr_results['CSV_Report'] = outDF
-
- elif _report_format=='plot':
- fig_ax = plot_hvsr(hvsr_results, plot_type=_plot_type, show=False, return_fig=True)
-
- export_report(export_obj=fig_ax[0], _export_path=_export_path, _rep_form=_report_format)
- hvsr_results['BestPeak']['Report']['HV_Plot']=hvsr_results['HV_Plot']=fig_ax
-
- print('\nPlot of data report:')
- plt.show()
-
- return hvsr_results
-
- for i, rep_form in enumerate(report_format):
- if isinstance(export_path, (list, tuple)):
- if not isinstance(report_format, (list, tuple)):
- warnings.warn('export_path is a list/tuple and report_format is not. This may result in unexpected behavior.')
- if isinstance(report_format, (list, tuple)) and isinstance(export_path, (list, tuple)) and len(report_format) != len(export_path):
- warnings.warn('export_path and report_format are both lists or tuples, but they are not the same length. This may result in unexpected behavior.')
-
- exp_path = export_path[i]
- else:
- exp_path = export_path
- hvsr_results = report_output(_report_format=rep_form, _plot_type=plot_type, _export_path=exp_path, _no_output=no_output, verbose=verbose)
-
- hvsr_results['processing_parameters']['get_report'] = {}
- for key, value in orig_args.items():
- hvsr_results['processing_parameters']['get_report'][key] = value
-
- return hvsr_results
-
-def gui(kind='default')
+def gui(kind='browser')
Function to open a graphical user interface (gui)
@@ -5077,90 +444,12 @@ Parameters
"widget" opens jupyter widget'
"lite" open lite (pending update), by default 'default'
-
-
-Expand source code
-
-def gui(kind='default'):
- """Function to open a graphical user interface (gui)
-
- Parameters
- ----------
- kind : str, optional
- What type of gui to open. "default" opens regular windowed interface,
- "widget" opens jupyter widget'
- "lite" open lite (pending update), by default 'default'
-
- """
- defaultList = ['windowed', 'window', 'default', 'd']
- widgetList = ['widget', 'jupyter', 'notebook', 'w', 'nb']
- liteList = ['lite', 'light', 'basic', 'l', 'b']
-
- if kind.lower() in defaultList:
- import pkg_resources
- #guiPath = pathlib.Path(os.path.realpath(__file__))
- try:
- from sprit.sprit_gui import SPRIT_App
- except:
- from sprit_gui import SPRIT_App
-
- try:
- import tkinter as tk
- except:
- if sys.platform == 'linux':
- raise ImportError('The SpRIT graphical interface uses tkinter, which ships with python but is not pre-installed on linux machines. Use "apt-get install python-tk" or "apt-get install python3-tk" to install tkinter. You may need to use the sudo command at the start of those commands.')
-
- def on_gui_closing():
- plt.close('all')
- gui_root.quit()
- gui_root.destroy()
-
- if sys.platform == 'linux':
- if not pathlib.Path("/usr/share/doc/python3-tk").exists():
- warnings.warn('The SpRIT graphical interface uses tkinter, which ships with python but is not pre-installed on linux machines. Use "apt-get install python-tk" or "apt-get install python3-tk" to install tkinter. You may need to use the sudo command at the start of those commands.')
-
- gui_root = tk.Tk()
- try:
- try:
- icon_path =pathlib.Path(pkg_resources.resource_filename(__name__, 'resources/icon/sprit_icon_alpha.ico'))
- gui_root.iconbitmap(icon_path)
- except:
- icon_path = pathlib.Path(pkg_resources.resource_filename(__name__, 'resources/icon/sprit_icon.png'))
- gui_root.iconphoto(False, tk.PhotoImage(file=icon_path.as_posix()))
- except Exception as e:
- print("ICON NOT LOADED, still opening GUI")
-
- gui_root.resizable(True, True)
- spritApp = SPRIT_App(master=gui_root) #Open the app with a tk.Tk root
-
- gui_root.protocol("WM_DELETE_WINDOW", on_gui_closing)
- gui_root.mainloop() #Run the main loop
- elif kind.lower() in widgetList:
- try:
- sprit_jupyter_UI.create_jupyter_ui()
- except Exception as e:
- print(e)
-
def has_required_channels(stream)
-
-
-Expand source code
-
-def has_required_channels(stream):
- channel_set = set()
-
- # Extract the channel codes from the traces in the stream
- for trace in stream:
- channel_set.add(trace.stats.channel)
-
- # Check if Z, E, and N channels are present
- return {'Z', 'E', 'N'}.issubset(channel_set)
-
def import_data(import_filepath, data_format='pickle')
@@ -5179,74 +468,29 @@ Returns
HVSRData
or HVSRBatch object
-
-
-Expand source code
-
-def import_data(import_filepath, data_format='pickle'):
- """Function to import .hvsr (or other extension) data exported using export_data() function
-
- Parameters
- ----------
- import_filepath : str or path object
- Filepath of file created using export_data() function. This is usually a pickle file with a .hvsr extension
- data_format : str, default='pickle'
- Type of format data is in. Currently, only 'pickle' supported. Eventually, json or other type may be supported, by default 'pickle'.
-
- Returns
- -------
- HVSRData or HVSRBatch object
- """
- if data_format=='pickle':
- with open(import_filepath, 'rb') as f:
- dataIN = pickle.load(f)
- else:
- dataIN = import_filepath
- return dataIN
-
-
-def import_settings(settings_import_path, settings_import_type='instrument', verbose=False)
-
-
-
-
-
-Expand source code
-
-def import_settings(settings_import_path, settings_import_type='instrument', verbose=False):
-
- allList = ['all', ':', 'both', 'any']
- if settings_import_type.lower() not in allList:
- # if just a single settings dict is desired
- with open(settings_import_path, 'r') as f:
- settingsDict = json.load(f)
- else:
- # Either a directory or list
- if isinstance(settings_import_path, (list, tuple)):
- for setPath in settings_import_path:
- pass
- else:
- settings_import_path = sprit_utils.checkifpath(settings_import_path)
- if not settings_import_path.is_dir():
- raise RuntimeError(f'settings_import_type={settings_import_type}, but settings_import_path is not list/tuple or filepath to directory')
- else:
- instFile = settings_import_path.glob('*.inst')
- procFile = settings_import_path.glob('*.proc')
- return settingsDict
-
+
+def import_settings(settings_import_path, settings_import_type='instrument', verbose=False)
+
+
+
-def input_params(datapath, site='HVSR Site', network='AM', station='RAC84', loc='00', channels=['EHZ', 'EHN', 'EHE'], acq_date='2024-02-19', starttime='00:00:00.00', endtime='23:59:59.999999', tzone='UTC', xcoord=-88.2290526, ycoord=40.1012122, elevation=755, input_crs='EPSG:4326', output_crs='EPSG:4326', elev_unit='feet', depth=0, instrument='Raspberry Shake', metapath=None, hvsr_band=[1, 40], peak_freq_range=[1, 40], processing_parameters={}, verbose=False)
+def input_params(input_data, site='HVSR Site', id_prefix=None, network='AM', station='RAC84', loc='00', channels=['EHZ', 'EHN', 'EHE'], acq_date='2024-10-30', starttime=UTCDateTime(2024, 10, 30, 0, 0), endtime=UTCDateTime(2024, 10, 30, 23, 59, 59, 999999), tzone='UTC', xcoord=-88.2290526, ycoord=40.1012122, elevation=755, input_crs=None, output_crs=None, elev_unit='meters', depth=0, instrument='Raspberry Shake', metapath=None, hvsr_band=[0.4, 40], peak_freq_range=[0.4, 40], processing_parameters={}, verbose=False)
Function for designating input parameters for reading in and processing data
Parameters
-datapath
: str
or pathlib.Path object
+input_data
: str
or pathlib.Path object
- Filepath of data. This can be a directory or file, but will need to match with what is chosen later as the source parameter in fetch_data()
site
: str
, default="HVSR Site"
- Site name as designated by user for ease of reference. Used for plotting titles, filenames, etc.
+id_prefix
: str
, default=None
+- A prefix that may be used to create unique identifiers for each site.
+The identifier created is saved as the ['HVSR_ID'] attribute of the HVSRData object,
+and is equivalent to the following formatted string:
+f"{id_prefix}-{acq_date.strftime("%Y%m%d")}-{starttime.strftime("%H%M")}-{station}".
network
: str
, default='AM'
- The network designation of the seismometer. This is necessary for data from Raspberry Shakes. 'AM' is for Amateur network, which fits Raspberry Shakes.
station
: str
, default='RAC84'
@@ -5268,24 +512,24 @@ Parameters
If int, should be the int value of the UTC offset (e.g., for American Eastern Standard Time: -5).
This is necessary for Raspberry Shake data in 'raw' format.
xcoord
: float
, default=-88.2290526
-- Longitude (or easting, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in csv output, but will likely be used in future for mapping/profile purposes.
+- Longitude (or easting, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in table output, but will likely be used in future for mapping/profile purposes.
ycoord
: float
, default=40.1012122
-- Latitute (or northing, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in csv output, but will likely be used in future for mapping/profile purposes.
+- Latitute (or northing, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in table output, but will likely be used in future for mapping/profile purposes.
input_crs
: str
or other format read by pyproj
, default='EPSG:4326'
- Coordinate reference system of input data, as used by pyproj.CRS.from_user_input()
output_crs
: str
or other format read by pyproj
, default='EPSG:4326'
- Coordinate reference system to which input data will be transformed, as used by pyproj.CRS.from_user_input()
elevation
: float
, default=755
-- Surface elevation of data point. Not currently used (except in csv output), but will likely be used in the future.
+- Surface elevation of data point. Not currently used (except in table output), but will likely be used in the future.
depth
: float
, default=0
- Depth of seismometer. Not currently used, but will likely be used in the future.
-instrument
: str
or list {'Raspberry Shake')
+instrument
: str {'Raspberry Shake', "Tromino"}
- Instrument from which the data was acquired.
metapath
: str
or pathlib.Path object
, default=None
- Filepath of metadata, in format supported by obspy.read_inventory. If default value of None, will read from resources folder of repository (only supported for Raspberry Shake).
-hvsr_band
: list
, default=[1, 40]
+hvsr_band
: list
, default=[0.4, 40]
- Two-element list containing low and high "corner" frequencies (in Hz) for processing. This can specified again later.
-peak_freq_range
: list
or tuple
, default=[1, 40]
+peak_freq_range
: list
or tuple
, default=[0.4, 40]
- Two-element list or tuple containing low and high frequencies (in Hz) that are used to check for HVSR Peaks. This can be a tigher range than hvsr_band, but if larger, it will still only use the hvsr_band range.
- processing_parameters={} : dict or filepath, default={}
- If filepath, should point to a .proc json file with processing parameters (i.e, an output from sprit.export_settings()).
@@ -5293,7 +537,7 @@ Parameters
- If dictionary, dictionary containing nested dictionaries of function names as they key, and the parameter names/values as key/value pairs for each key.
- If a function name is not present, or if a parameter name is not present, default values will be used.
- For example:
-{ 'fetch_data' : {'source':'batch', 'trim_dir':"/path/to/trimmed/data", 'export_format':'mseed', 'detrend':'spline', 'plot_input_stream':True, 'verbose':False, kwargs:{'kwargskey':'kwargsvalue'}} }
+{ 'fetch_data' : {'source':'batch', 'data_export_path':"/path/to/trimmed/data", 'data_export_format':'mseed', 'detrend':'spline', 'plot_input_stream':True, 'verbose':False, kwargs:{'kwargskey':'kwargsvalue'}} }
verbose
: bool
, default=False
- Whether to print output and results to terminal
@@ -5302,277 +546,45 @@ Returns
params
: HVSRData
sprit.HVSRData class containing input parameters, including data file path and metadata path. This will be used as an input to other functions. If batch processing, params will be converted to batch type in fetch_data() step.
-
-
-Expand source code
-
-def input_params(datapath,
- site='HVSR Site',
- network='AM',
- station='RAC84',
- loc='00',
- channels=['EHZ', 'EHN', 'EHE'],
- acq_date=str(datetime.datetime.now().date()),
- starttime = '00:00:00.00',
- endtime = '23:59:59.999999',
- tzone = 'UTC',
- xcoord = -88.2290526,
- ycoord = 40.1012122,
- elevation = 755,
- input_crs='EPSG:4326',#4269 is NAD83, defautling to WGS
- output_crs='EPSG:4326',
- elev_unit = 'feet',
- depth = 0,
- instrument = 'Raspberry Shake',
- metapath = None,
- hvsr_band = [1, 40],
- peak_freq_range=[1, 40],
- processing_parameters={},
- verbose=False
- ):
- """Function for designating input parameters for reading in and processing data
-
- Parameters
- ----------
- datapath : str or pathlib.Path object
- Filepath of data. This can be a directory or file, but will need to match with what is chosen later as the source parameter in fetch_data()
- site : str, default="HVSR Site"
- Site name as designated by user for ease of reference. Used for plotting titles, filenames, etc.
- network : str, default='AM'
- The network designation of the seismometer. This is necessary for data from Raspberry Shakes. 'AM' is for Amateur network, which fits Raspberry Shakes.
- station : str, default='RAC84'
- The station name of the seismometer. This is necessary for data from Raspberry Shakes.
- loc : str, default='00'
- Location information of the seismometer.
- channels : list, default=['EHZ', 'EHN', 'EHE']
- The three channels used in this analysis, as a list of strings. Preferred that Z component is first, but not necessary
- acq_date : str, int, date object, or datetime object
- If string, preferred format is 'YYYY-MM-DD'.
- If int, this will be interpreted as the time_int of year of current year (e.g., 33 would be Feb 2 of current year)
- If date or datetime object, this will be the date. Make sure to account for time change when converting to UTC (if UTC is the following time_int, use the UTC time_int).
- starttime : str, time object, or datetime object, default='00:00:00.00'
- Start time of data stream. This is necessary for Raspberry Shake data in 'raw' form, or for trimming data. Format can be either 'HH:MM:SS.micros' or 'HH:MM' at minimum.
- endtime : str, time obejct, or datetime object, default='23:59:99.99'
- End time of data stream. This is necessary for Raspberry Shake data in 'raw' form, or for trimming data. Same format as starttime.
- tzone : str or int, default = 'UTC'
- Timezone of input data. If string, 'UTC' will use the time as input directly. Any other string value needs to be a TZ identifier in the IANA database, a wikipedia page of these is available here: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones.
- If int, should be the int value of the UTC offset (e.g., for American Eastern Standard Time: -5).
- This is necessary for Raspberry Shake data in 'raw' format.
- xcoord : float, default=-88.2290526
- Longitude (or easting, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in csv output, but will likely be used in future for mapping/profile purposes.
- ycoord : float, default=40.1012122
- Latitute (or northing, or, generally, X coordinate) of data point, in Coordinate Reference System (CRS) designated by input_crs. Currently only used in csv output, but will likely be used in future for mapping/profile purposes.
- input_crs : str or other format read by pyproj, default='EPSG:4326'
- Coordinate reference system of input data, as used by pyproj.CRS.from_user_input()
- output_crs : str or other format read by pyproj, default='EPSG:4326'
- Coordinate reference system to which input data will be transformed, as used by pyproj.CRS.from_user_input()
- elevation : float, default=755
- Surface elevation of data point. Not currently used (except in csv output), but will likely be used in the future.
- depth : float, default=0
- Depth of seismometer. Not currently used, but will likely be used in the future.
- instrument : str or list {'Raspberry Shake')
- Instrument from which the data was acquired.
- metapath : str or pathlib.Path object, default=None
- Filepath of metadata, in format supported by obspy.read_inventory. If default value of None, will read from resources folder of repository (only supported for Raspberry Shake).
- hvsr_band : list, default=[1, 40]
- Two-element list containing low and high "corner" frequencies (in Hz) for processing. This can specified again later.
- peak_freq_range : list or tuple, default=[1, 40]
- Two-element list or tuple containing low and high frequencies (in Hz) that are used to check for HVSR Peaks. This can be a tigher range than hvsr_band, but if larger, it will still only use the hvsr_band range.
- processing_parameters={} : dict or filepath, default={}
- If filepath, should point to a .proc json file with processing parameters (i.e, an output from sprit.export_settings()).
- Note that this only applies to parameters for the functions: 'fetch_data', 'remove_noise', 'generate_ppsds', 'process_hvsr', 'check_peaks', and 'get_report.'
- If dictionary, dictionary containing nested dictionaries of function names as they key, and the parameter names/values as key/value pairs for each key.
- If a function name is not present, or if a parameter name is not present, default values will be used.
- For example:
- `{ 'fetch_data' : {'source':'batch', 'trim_dir':"/path/to/trimmed/data", 'export_format':'mseed', 'detrend':'spline', 'plot_input_stream':True, 'verbose':False, kwargs:{'kwargskey':'kwargsvalue'}} }`
- verbose : bool, default=False
- Whether to print output and results to terminal
-
- Returns
- -------
- params : sprit.HVSRData
- sprit.HVSRData class containing input parameters, including data file path and metadata path. This will be used as an input to other functions. If batch processing, params will be converted to batch type in fetch_data() step.
-
- """
- orig_args = locals().copy() #Get the initial arguments
- start_time = datetime.datetime.now()
-
- #Reformat times
- if type(acq_date) is datetime.datetime:
- date = str(acq_date.date())
- elif type(acq_date) is datetime.date:
- date=str(acq_date)
- elif type(acq_date) is str:
- monthStrs = {'jan':1, 'january':1,
- 'feb':2, 'february':2,
- 'mar':3, 'march':3,
- 'apr':4, 'april':4,
- 'may':5,
- 'jun':6, 'june':6,
- 'jul':7, 'july':7,
- 'aug':8, 'august':8,
- 'sep':9, 'sept':9, 'september':9,
- 'oct':10,'october':10,
- 'nov':11,'november':11,
- 'dec':12,'december':12}
-
- spelledMonth = False
- for m in monthStrs.keys():
- acq_date = acq_date.lower()
- if m in acq_date:
- spelledMonth = True
- break
-
- if spelledMonth is not False:
- month = monthStrs[m]
-
- if '/' in acq_date:
- sep = '/'
- elif '.' in acq_date:
- sep='.'
- elif ' ' in acq_date:
- sep = ' '
- acq_date = acq_date.replace(',', '')
- else:
- sep = '-'
-
- acq_date = acq_date.split(sep)
- if len(acq_date[2]) > 2: #American format
- date = '{}-{}-{}'.format(acq_date[2], acq_date[0], acq_date[1])
- else: #international format, one we're going to use
- date = '{}-{}-{}'.format(acq_date[0], acq_date[1], acq_date[2])
-
- elif type(acq_date) is int:
- year=datetime.datetime.today().year
- date = str((datetime.datetime(year, 1, 1) + datetime.timedelta(acq_date - 1)).date())
-
- if type(starttime) is str:
- if 'T' in starttime:
- #date=starttime.split('T')[0]
- starttime = starttime.split('T')[1]
- else:
- pass
- #starttime = date+'T'+starttime
- elif type(starttime) is datetime.datetime:
- #date = str(starttime.date())
- starttime = str(starttime.time())
- ###HERE IS NEXT
- elif type(starttime) is datetime.time():
- starttime = str(starttime)
-
- starttime = str(date)+"T"+str(starttime)
- starttime = obspy.UTCDateTime(sprit_utils.format_time(starttime, tzone=tzone))
-
- if type(endtime) is str:
- if 'T' in endtime:
- date=endtime.split('T')[0]
- endtime = endtime.split('T')[1]
- elif type(endtime) is datetime.datetime:
- date = str(endtime.date())
- endtime = str(endtime.time())
- elif type(endtime) is datetime.time():
- endtime = str(endtime)
-
- endtime = str(date)+"T"+str(endtime)
- endtime = obspy.UTCDateTime(sprit_utils.format_time(endtime, tzone=tzone))
-
- acq_date = datetime.date(year=int(date.split('-')[0]), month=int(date.split('-')[1]), day=int(date.split('-')[2]))
- raspShakeInstNameList = ['raspberry shake', 'shake', 'raspberry', 'rs', 'rs3d', 'rasp. shake', 'raspshake']
-
- if output_crs is None:
- output_crs='EPSG:4326'
-
- if input_crs is None:
- input_crs = 'EPSG:4326'#Default to WGS84
- else:
- input_crs = CRS.from_user_input(input_crs)
- output_crs = CRS.from_user_input(output_crs)
-
- coord_transformer = Transformer.from_crs(input_crs, output_crs, always_xy=True)
- xcoord, ycoord = coord_transformer.transform(xcoord, ycoord)
-
- #Add key/values to input parameter dictionary
- inputParamDict = {'site':site, 'net':network,'sta':station, 'loc':loc, 'cha':channels, 'instrument':instrument,
- 'acq_date':acq_date,'starttime':starttime,'endtime':endtime, 'timezone':'UTC', #Will be in UTC by this point
- 'longitude':xcoord,'latitude':ycoord,'elevation':elevation,'input_crs':input_crs, 'output_crs':output_crs,
- 'depth':depth, 'datapath': datapath, 'metapath':metapath, 'hvsr_band':hvsr_band, 'peak_freq_range':peak_freq_range,
- 'ProcessingStatus':{'InputParamsStatus':True, 'OverallStatus':True}
- }
-
- #Replace any default parameter settings with those from json file of interest, potentially
- instrument_settings_dict = {}
- if pathlib.Path(instrument).exists():
- instrument_settings = import_settings(settings_import_path=instrument, settings_import_type='instrument', verbose=verbose)
- input_params_args = inspect.getfullargspec(input_params).args
- input_params_args.append('net')
- input_params_args.append('sta')
- for k, settings_value in instrument_settings.items():
- if k in input_params_args:
- instrument_settings_dict[k] = settings_value
- inputParamDict['instrument_settings'] = inputParamDict['instrument']
- inputParamDict.update(instrument_settings_dict)
-
- if instrument.lower() in raspShakeInstNameList:
- if metapath is None or metapath=='':
- metapath = pathlib.Path(pkg_resources.resource_filename(__name__, 'resources/rs3dv5plus_metadata.inv')).as_posix()
- inputParamDict['metapath'] = metapath
- #metapath = pathlib.Path(os.path.realpath(__file__)).parent.joinpath('/resources/rs3dv7_metadata.inv')
-
- for settingName in instrument_settings_dict.keys():
- if settingName in inputParamDict.keys():
- inputParamDict[settingName] = instrument_settings_dict[settingName]
-
- #Declare obspy here instead of at top of file for (for example) colab, where obspy first needs to be installed on environment
- if verbose:
- print('Gathering input parameters (input_params())')
- for key, value in inputParamDict.items():
- print('\t {}={}'.format(key, value))
- print()
-
- if isinstance(processing_parameters, dict):
- inputParamDict['processing_parameters'] = processing_parameters
- else:
- processing_parameters = sprit_utils.checkifpath(processing_parameters)
- inputParamDict['processing_parameters'] = import_settings(processing_parameters, settings_import_type='processing', verbose=verbose)
-
- #Format everything nicely
- params = sprit_utils.make_it_classy(inputParamDict)
- params['ProcessingStatus']['InputParamsStatus'] = True
- params = _check_processing_status(params, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose)
- return params
-
def make_it_classy(input_data, verbose=False)
-
-
-Expand source code
-
-def make_it_classy(input_data, verbose=False):
- if isinstance(input_data, (sprit_hvsr.HVSRData, sprit_hvsr.HVSRBatch)):
- for k, v in input_data.items():
- if k=='input_params':
- for kin in input_data['input_params'].keys():
- if kin not in input_data.keys():
- input_data[kin] = input_data['input_params'][kin]
- if k=='params':
- for kin in input_data['params'].keys():
- print(kin)
- if kin not in input_data.keys():
- input_data[kin] = input_data['params'][kin]
- output_class = input_data
- else:
- output_class = sprit_hvsr.HVSRData(input_data)
- if verbose:
- print('Made it classy | {} --> {}'.format(type(input_data), type(output_class)))
- return output_class
-
+
+
+def parse_plot_string(plot_string)
+
+
+
+
+
+def plot_azimuth(hvsr_data, fig=None, ax=None, show_azimuth_peaks=False, interpolate_azimuths=True, show_azimuth_grid=False, show_plot=True, **plot_azimuth_kwargs)
+
+
+Function to plot azimuths when azimuths are calculated
+Parameters
+
+hvsr_data
: HVSRData
or HVSRBatch
+- HVSRData that has gone through at least the sprit.fetch_data() step, and before sprit.generate_ppsds()
+show_azimuth_peaks
: bool
, optional
+- Whether to display the peak value at each azimuth calculated on the chart, by default False
+interpolate_azimuths
: bool
, optional
+- Whether to interpolate the azimuth data to get a smoother plot.
+This is just for visualization, does not change underlying data.
+It takes a lot of time to process the data, but interpolation for vizualization can happen fairly fast. By default True.
+show_azimuth_grid
: bool
, optional
+- Whether to display the grid on the chart, by default False
+
+Returns
+
+matplotlib.Figure, matplotlib.Axis
+- Figure and axis of resulting azimuth plot
+
-def plot_hvsr(hvsr_data, plot_type='HVSR ann p C+ ann p SPEC', use_subplots=True, fig=None, ax=None, return_fig=False, save_dir=None, save_suffix='', show_legend=False, show=True, close_figs=False, clear_fig=True, **kwargs)
+def plot_hvsr(hvsr_data, plot_type='HVSR ann p C+ ann p SPEC ann p', azimuth='HV', use_subplots=True, fig=None, ax=None, return_fig=False, plot_engine='matplotlib', save_dir=None, save_suffix='', show_legend=False, show_plot=True, close_figs=False, clear_fig=True, **kwargs)
Function to plot HVSR data
@@ -5581,14 +593,19 @@ Parameters
hvsr_data
: dict
Dictionary containing output from process_hvsr function
-plot_type
: str
or list
, default = 'HVSR ann p C+ ann p SPEC'
+plot_type
: str
or list
, default = 'HVSR ann p C+ ann p SPEC ann p'
The plot_type of plot(s) to plot. If list, will plot all plots listed
- 'HVSR' - Standard HVSR plot, including standard deviation. Options are included below:
- 'p' shows a vertical dotted line at frequency of the "best" peak
- 'ann' annotates the frequency value of of the "best" peak
- 'all' shows all the peaks identified in check_peaks() (by default, only the max is identified)
- 't' shows the H/V curve for all time windows
--'tp' shows all the peaks from the H/V curves of all the time windows
+- 'tp' shows all the peaks from the H/V curves of all the time windows
+- 'fr' shows the window within which SpRIT will search for peak frequencies, as set by peak_freq_range
+- 'test' shows a visualization of the results of the peak validity test(s). Examples:
+- 'tests' visualizes the results of all the peak tests (not the curve tests)
+- 'test12' shows the results of tests 1 and 2.
+- Append any number 1-6 after 'test' to show a specific test result visualized
- 'COMP' - plot of the PPSD curves for each individual component ("C" also works)
- '+' (as a suffix in 'C+' or 'COMP+') plots C on a plot separate from HVSR (C+ is default, but without + will plot on the same plot as HVSR)
- 'p' shows a vertical dotted line at frequency of the "best" peak
@@ -5597,7 +614,17 @@ Parameters
- 't' shows the H/V curve for all time windows
- 'SPEC' - spectrogram style plot of the H/V curve over time
- 'p' shows a horizontal dotted line at the frequency of the "best" peak
-- 'ann' annotates the frequency value of the "best" peak
+- 'ann' annotates the frequency value of the "best" peak
+- 'all' shows all the peaks identified in check_peaks()
+- 'tp' shows all the peaks of the H/V curve at all time windows
+- 'AZ' - circular plot of calculated azimuthal HV curves, similar in style to SPEC plot.
+- 'p' shows a point at each calculated (not interpolated) azimuth peak
+- 'g' shows grid lines at various angles
+- 'i' interpolates so that there is an interpolated azimuth at each degree interval (1 degree step)
+This is the default, so usually 'i' is not needed.
+- '-i' prohibits interpolation (only shows the calculated azimuths, as determined by azimuth_angle (default = 30))
+azimuth
: str
, default = 'HV'
+What 'azimuth' to plot, default being standard N E components combined
use_subplots
: bool
, default = True
Whether to output the plots as subplots (True) or as separate plots (False)
fig
: matplotlib.Figure
, default = None
@@ -5606,13 +633,15 @@ Parameters
If not None, matplotlib axis on which plot is plotted
return_fig
: bool
Whether to return figure and axis objects
+plot_engine
: str
, default='Matplotlib'
+Which engine to use for plotting. Both "matplotlib" and "plotly" are acceptable. For shorthand, 'mpl', 'm' also work for matplotlib; 'plty' or 'p' also work for plotly. Not case sensitive.
save_dir
: str
or None
Directory in which to save figures
save_suffix
: str
Suffix to add to end of figure filename(s), if save_dir is used
show_legend
: bool
, default=False
Whether to show legend in plot
-show
: bool
+show_plot
: bool
Whether to show plot
close_figs
: bool
, default=False
Whether to close figures before plotting
@@ -5626,185 +655,27 @@ Returns
fig
, ax
: matplotlib figure and axis objects
Returns figure and axis matplotlib.pyplot objects if return_fig=True, otherwise, simply plots the figures
-
-
-Expand source code
-
-def plot_hvsr(hvsr_data, plot_type='HVSR ann p C+ ann p SPEC', use_subplots=True, fig=None, ax=None, return_fig=False, save_dir=None, save_suffix='', show_legend=False, show=True, close_figs=False, clear_fig=True,**kwargs):
- """Function to plot HVSR data
-
- Parameters
- ----------
- hvsr_data : dict
- Dictionary containing output from process_hvsr function
- plot_type : str or list, default = 'HVSR ann p C+ ann p SPEC'
- The plot_type of plot(s) to plot. If list, will plot all plots listed
- - 'HVSR' - Standard HVSR plot, including standard deviation. Options are included below:
- - 'p' shows a vertical dotted line at frequency of the "best" peak
- - 'ann' annotates the frequency value of of the "best" peak
- - 'all' shows all the peaks identified in check_peaks() (by default, only the max is identified)
- - 't' shows the H/V curve for all time windows
- -'tp' shows all the peaks from the H/V curves of all the time windows
- - 'COMP' - plot of the PPSD curves for each individual component ("C" also works)
- - '+' (as a suffix in 'C+' or 'COMP+') plots C on a plot separate from HVSR (C+ is default, but without + will plot on the same plot as HVSR)
- - 'p' shows a vertical dotted line at frequency of the "best" peak
- - 'ann' annotates the frequency value of of the "best" peak
- - 'all' shows all the peaks identified in check_peaks() (by default, only the max is identified)
- - 't' shows the H/V curve for all time windows
- - 'SPEC' - spectrogram style plot of the H/V curve over time
- - 'p' shows a horizontal dotted line at the frequency of the "best" peak
- - 'ann' annotates the frequency value of the "best" peak
- use_subplots : bool, default = True
- Whether to output the plots as subplots (True) or as separate plots (False)
- fig : matplotlib.Figure, default = None
- If not None, matplotlib figure on which plot is plotted
- ax : matplotlib.Axis, default = None
- If not None, matplotlib axis on which plot is plotted
- return_fig : bool
- Whether to return figure and axis objects
- save_dir : str or None
- Directory in which to save figures
- save_suffix : str
- Suffix to add to end of figure filename(s), if save_dir is used
- show_legend : bool, default=False
- Whether to show legend in plot
- show : bool
- Whether to show plot
- close_figs : bool, default=False
- Whether to close figures before plotting
- clear_fig : bool, default=True
- Whether to clear figures before plotting
- **kwargs : keyword arguments
- Keyword arguments for matplotlib.pyplot
-
- Returns
- -------
- fig, ax : matplotlib figure and axis objects
- Returns figure and axis matplotlib.pyplot objects if return_fig=True, otherwise, simply plots the figures
- """
- orig_args = locals().copy() #Get the initial arguments
- if isinstance(hvsr_data, HVSRBatch):
- #If running batch, we'll loop through each site
- for site_name in hvsr_data.keys():
- args = orig_args.copy() #Make a copy so we don't accidentally overwrite
- individual_params = hvsr_data[site_name] #Get what would normally be the "params" variable for each site
- args['hvsr_results'] = individual_params #reset the params parameter we originally read in to an individual site params
- if hvsr_data[site_name]['ProcessingStatus']['OverallStatus']:
- try:
- _hvsr_plot_batch(**args) #Call another function, that lets us run this function again
- except:
- print(f"{site_name} not able to be plotted.")
- else:
- if clear_fig and fig is not None and ax is not None: #Intended use for tkinter
- #Clear everything
- for key in ax:
- ax[key].clear()
- for t in fig.texts:
- del t
- fig.clear()
- if close_figs:
- plt.close('all')
-
- compList = ['c', 'comp', 'component', 'components']
- specgramList = ['spec', 'specgram', 'spectrogram']
- hvsrList = ['hvsr', 'hv', 'h']
-
- hvsrInd = np.nan
- compInd = np.nan
- specInd = np.nan
-
- kList = plot_type.split(' ')
- for i, k in enumerate(kList):
- kList[i] = k.lower()
-
- #Get the plots in the right order, no matter how they were input (and ensure the right options go with the right plot)
- #HVSR index
- if len(set(hvsrList).intersection(kList)):
- for i, hv in enumerate(hvsrList):
- if hv in kList:
- hvsrInd = kList.index(hv)
- break
- #Component index
- #if len(set(compList).intersection(kList)):
- for i, c in enumerate(kList):
- if '+' in c and c[:-1] in compList:
- compInd = kList.index(c)
- break
-
- #Specgram index
- if len(set(specgramList).intersection(kList)):
- for i, sp in enumerate(specgramList):
- if sp in kList:
- specInd = kList.index(sp)
- break
-
- indList = [hvsrInd, compInd, specInd]
- indListCopy = indList.copy()
- plotTypeList = ['hvsr', 'comp', 'spec']
-
- plotTypeOrder = []
- plotIndOrder = []
-
- lastVal = 0
- while lastVal != 99:
- firstInd = np.nanargmin(indListCopy)
- plotTypeOrder.append(plotTypeList[firstInd])
- plotIndOrder.append(indList[firstInd])
- lastVal = indListCopy[firstInd]
- indListCopy[firstInd] = 99 #just a high number
-
- plotTypeOrder.pop()
- plotIndOrder[-1]=len(kList)
-
- for i, p in enumerate(plotTypeOrder):
- pStartInd = plotIndOrder[i]
- pEndInd = plotIndOrder[i+1]
- plotComponents = kList[pStartInd:pEndInd]
-
- if use_subplots and i==0 and fig is None and ax is None:
- mosaicPlots = []
- for pto in plotTypeOrder:
- mosaicPlots.append([pto])
- fig, ax = plt.subplot_mosaic(mosaicPlots, gridspec_kw={'hspace':0.3})
- axis = ax[p]
- elif use_subplots:
- with warnings.catch_warnings():
- warnings.simplefilter("ignore") #Often warns about xlim when it is not an issue
- ax[p].clear()
- axis = ax[p]
- else:
- fig, axis = plt.subplots()
-
- if p == 'hvsr':
- kwargs['p'] = 'hvsr'
- _plot_hvsr(hvsr_data, fig=fig, ax=axis, plot_type=plotComponents, xtype='x_freqs', show_legend=show_legend, axes=ax, **kwargs)
- elif p=='comp':
- plotComponents[0] = plotComponents[0][:-1]
- kwargs['p']=='comp'
- _plot_hvsr(hvsr_data, fig=fig, ax=axis, plot_type=plotComponents, xtype='x_freqs', show_legend=show_legend, axes=ax, **kwargs)
- elif p=='spec':
- plottypeKwargs = {}
- for c in plotComponents:
- plottypeKwargs[c] = True
- kwargs.update(plottypeKwargs)
- _plot_specgram_hvsr(hvsr_data, fig=fig, ax=axis, colorbar=False, **kwargs)
- else:
- warnings.warn('Plot type {p} not recognized', UserWarning)
-
- windowsUsedStr = f"{hvsr_data['hvsr_df']['Use'].sum()}/{hvsr_data['hvsr_df'].shape[0]} windows used"
- fig.text(x=0.98, y=0.02, s=windowsUsedStr, ha='right', va='bottom', fontsize='x-small',
- bbox=dict(facecolor='w', edgecolor=None, linewidth=0, alpha=1, pad=9))
-
- if show:
- fig.canvas.draw()
-
- if return_fig:
- return fig, ax
- return
-
+
+
+def plot_outlier_curves(hvsr_data, plot_engine='plotly', rmse_thresh=0.98, use_percentile=True, use_hv_curve=False, from_roc=False, show_plot=True, verbose=False)
+
+
+
+
+
+def plot_preview(hv_data, stream=None, preview_fig=None, spectrogram_component='Z', show_plot=True, return_fig=False)
+
+
+
+
+
+def plot_results(hv_data, plot_string='HVSR p ann C+ p SPEC ann', results_fig=None, results_graph_widget=None, return_fig=False, show_results_plot=True)
+
+
+
-def process_hvsr(hvsr_data, method=3, smooth=True, freq_smooth='konno ohmachi', f_smooth_width=40, resample=True, outlier_curve_rmse_percentile=False, verbose=False)
+def process_hvsr(hvsr_data, horizontal_method=None, smooth=True, freq_smooth='konno ohmachi', f_smooth_width=40, resample=True, outlier_curve_rmse_percentile=False, azimuth=None, verbose=False)
Process the input data and get HVSR data
@@ -5815,16 +686,26 @@ Parameters
hvsr_data
: HVSRData or HVSRBatch
Data object containing all the parameters input and generated by the user (usually, during sprit.input_params(), sprit.fetch_data(), sprit.generate_ppsds() and/or sprit.remove_noise()).
-method
+horizontal_method
: int or str, default=3
-Method to use for combining the horizontal components
+Method to use for combining the horizontal components. Default is 3) Geometric Mean
0) (not used)
-1) Diffuse field assumption, or 'DFA' (not currently implemented)
-2) 'Arithmetic Mean': H ≡ (HN + HE)/2
-3) 'Geometric Mean': H ≡ √HN · HE, recommended by the SESAME project (2004)
-4) 'Vector Summation': H ≡ √H2 N + H2 E
-5) 'Quadratic Mean': H ≡ √(H2 N + H2 E )/2
-6) 'Maximum Horizontal Value': H ≡ max
+1) 'Diffuse field assumption'
+H = √( (eie_E + eie_N) / eie_Z), eie = equal interval energy
+2) 'Arithmetic Mean'
+H ≡ (HN + HE)/2
+3) 'Geometric Mean'
+H ≡ √(HN · HE), recommended by the SESAME project (2004)
+4) 'Vector Summation'
+H ≡ √(HN^2 + HE^2)
+5) 'Quadratic Mean'
+H ≡ √(HN^2 + HE^2)/2
+6) 'Maximum Horizontal Value'
+H ≡ max
+7) 'Minimum Horizontal Valey'
+H ≡ min
+8) 'Single Azimuth'
+H = H2·cos(az) + H1·sin(az)
smooth
: bool, default=True
bool or int may be used.
@@ -5853,6 +734,8 @@ Parameters
If False, outlier curve removal is not carried out here.
If True, defaults to 98 (98th percentile).
Otherwise, float of percentile used as rmse_thresh of remove_outlier_curve().
+azimuth
: float
, default = None
+The azimuth angle to use when method is single azimuth.
verbose
: bool, defualt=False
Whether to print output to terminal
@@ -5860,457 +743,21 @@ Returns
hvsr_out : dict
Dictionary containing all the information about the data, including input parameters
-
-
-Expand source code
-
-def process_hvsr(hvsr_data, method=3, smooth=True, freq_smooth='konno ohmachi', f_smooth_width=40, resample=True, outlier_curve_rmse_percentile=False, verbose=False):
- """Process the input data and get HVSR data
-
- This is the main function that uses other (private) functions to do
- the bulk of processing of the HVSR data and the data quality checks.
-
- Parameters
- ----------
- hvsr_data : HVSRData or HVSRBatch
- Data object containing all the parameters input and generated by the user (usually, during sprit.input_params(), sprit.fetch_data(), sprit.generate_ppsds() and/or sprit.remove_noise()).
- method : int or str, default=3
- Method to use for combining the horizontal components
- 0) (not used)
- 1) Diffuse field assumption, or 'DFA' (not currently implemented)
- 2) 'Arithmetic Mean': H ≡ (HN + HE)/2
- 3) 'Geometric Mean': H ≡ √HN · HE, recommended by the SESAME project (2004)
- 4) 'Vector Summation': H ≡ √H2 N + H2 E
- 5) 'Quadratic Mean': H ≡ √(H2 N + H2 E )/2
- 6) 'Maximum Horizontal Value': H ≡ max {HN, HE}
- smooth : bool, default=True
- bool or int may be used.
- If True, default to smooth H/V curve to using savgoy filter with window length of 51 (works well with default resample of 1000 pts)
- If int, the length of the window in the savgoy filter.
- freq_smooth : str {'konno ohmachi', 'constant', 'proportional'}
- Which frequency smoothing method to use. By default, uses the 'konno ohmachi' method.
- - The Konno & Ohmachi method uses the obspy.signal.konnoohmachismoothing.konno_ohmachi_smoothing() function: https://docs.obspy.org/packages/autogen/obspy.signal.konnoohmachismoothing.konno_ohmachi_smoothing.html
- - The constant method uses a window of constant length f_smooth_width
- - The proportional method uses a window the percentage length of the frequncy steps/range (f_smooth_width now refers to percentage)
- See here for more information: https://www.geopsy.org/documentation/geopsy/hv-processing.html
- f_smooth_width : int, default = 40
- - For 'konno ohmachi': passed directly to the bandwidth parameter of the konno_ohmachi_smoothing() function, determines the width of the smoothing peak, with lower values resulting in broader peak. Must be > 0.
- - For 'constant': the size of a triangular smoothing window in the number of frequency steps
- - For 'proportional': the size of a triangular smoothing window in percentage of the number of frequency steps (e.g., if 1000 frequency steps/bins and f_smooth_width=40, window would be 400 steps wide)
- resample : bool, default = True
- bool or int.
- If True, default to resample H/V data to include 1000 frequency values for the rest of the analysis
- If int, the number of data points to interpolate/resample/smooth the component psd/HV curve data to.
- outlier_curve_rmse_percentile : bool, float, default = False
- If False, outlier curve removal is not carried out here.
- If True, defaults to 98 (98th percentile).
- Otherwise, float of percentile used as rmse_thresh of remove_outlier_curve().
- verbose : bool, defualt=False
- Whether to print output to terminal
-
- Returns
- -------
- hvsr_out : dict
- Dictionary containing all the information about the data, including input parameters
-
- """
- orig_args = locals().copy() #Get the initial arguments
- start_time = datetime.datetime.now()
-
- # Update with processing parameters specified previously in input_params, if applicable
- if 'processing_parameters' in hvsr_data.keys():
- if 'process_hvsr' in hvsr_data['processing_parameters'].keys():
- for k, v in hvsr_data['processing_parameters']['process_hvsr'].items():
- defaultVDict = dict(zip(inspect.getfullargspec(process_hvsr).args[1:],
- inspect.getfullargspec(process_hvsr).defaults))
- # Manual input to function overrides the imported parameter values
- if (not isinstance(v, (HVSRData, HVSRBatch))) and (k in orig_args.keys()) and (orig_args[k]==defaultVDict[k]):
- orig_args[k] = v
-
- method = orig_args['method']
- smooth = orig_args['smooth']
- freq_smooth = orig_args['freq_smooth']
- f_smooth_width = orig_args['f_smooth_width']
- resample = orig_args['resample']
- outlier_curve_rmse_percentile = orig_args['outlier_curve_rmse_percentile']
- verbose = orig_args['verbose']
-
- if (verbose and isinstance(hvsr_data, HVSRBatch)) or (verbose and not hvsr_data['batch']):
- if isinstance(hvsr_data, HVSRData) and hvsr_data['batch']:
- pass
- else:
- print('\nCalculating Horizontal/Vertical Ratios at all frequencies/time steps (process_hvsr())')
- print('\tUsing the following parameters:')
- for key, value in orig_args.items():
- if key=='hvsr_data':
- pass
- else:
- print('\t {}={}'.format(key, value))
- print()
-
- #First, divide up for batch or not
- #Site is in the keys anytime it's not batch
- if isinstance(hvsr_data, HVSRBatch):
- #If running batch, we'll loop through each site
- hvsr_out = {}
- for site_name in hvsr_data.keys():
- args = orig_args.copy() #Make a copy so we don't accidentally overwrite
- args['hvsr_data'] = hvsr_data[site_name] #Get what would normally be the "hvsr_data" variable for each site
- if hvsr_data[site_name]['ProcessingStatus']['OverallStatus']:
- try:
- hvsr_out[site_name] = _process_hvsr_batch(**args) #Call another function, that lets us run this function again
- except:
- hvsr_out = hvsr_data
- hvsr_out[site_name]['ProcessingStatus']['HVStatus']=False
- hvsr_out[site_name]['ProcessingStatus']['OverallStatus'] = False
- else:
- hvsr_out = hvsr_data
- hvsr_out[site_name]['ProcessingStatus']['HVStatus']=False
- hvsr_out[site_name]['ProcessingStatus']['OverallStatus'] = False
- hvsr_out = HVSRBatch(hvsr_out)
- else:
- ppsds = hvsr_data['ppsds'].copy()#[k]['psd_values']
- ppsds = sprit_utils.check_xvalues(ppsds)
-
- methodList = ['<placeholder_0>', 'Diffuse Field Assumption', 'Arithmetic Mean', 'Geometric Mean', 'Vector Summation', 'Quadratic Mean', 'Maximum Horizontal Value']
- x_freqs = {}
- x_periods = {}
-
- psdValsTAvg = {}
- stDev = {}
- stDevValsP = {}
- stDevValsM = {}
- psdRaw={}
- currTimesUsed={}
- hvsrDF = hvsr_data['hvsr_df']
-
- def move_avg(y, box_pts):
- #box = np.ones(box_pts)/box_pts
- box = np.hanning(box_pts)
- y_smooth = np.convolve(y, box, mode='same') / sum(box)
- return y_smooth
-
- for k in ppsds.keys():
- #input_ppsds = ppsds[k]['psd_values'] #original, not used anymore
- input_ppsds = np.stack(hvsrDF['psd_values_'+k].values)
-
- currPPSDs = hvsrDF['psd_values_'+k][hvsrDF['Use']].values
- used_ppsds = np.stack(currPPSDs)
-
- #if reasmpling has been selected
- if resample is True or isinstance(resample, (int, float)):
- if resample is True:
- resample = 1000 #Default smooth value
-
- #xValMin = min(ppsds[k]['period_bin_centers'])
- #xValMax = max(ppsds[k]['period_bin_centers'])
- xValMin = 1/hvsr_data['hvsr_band'][1]
- xValMax = 1/hvsr_data['hvsr_band'][0]
- #Resample period bin values
- x_periods[k] = np.logspace(np.log10(xValMin), np.log10(xValMax), num=resample)
- if smooth or isinstance(smooth, (int, float)):
- if smooth:
- smooth = 51 #Default smoothing window
- padVal = 25
- elif smooth % 2==0:
- smooth +1 #Otherwise, needs to be odd
- padVal = smooth//2
- if padVal %2==0:
- padVal += 1
-
- #Resample raw ppsd values
- for i, ppsd_t in enumerate(input_ppsds):
- if i==0:
- psdRaw[k] = np.interp(x_periods[k], ppsds[k]['period_bin_centers'], ppsd_t)
- if smooth is not False:
- padRawKPad = np.pad(psdRaw[k], [padVal, padVal], mode='reflect')
- #padRawKPadSmooth = scipy.signal.savgol_filter(padRawKPad, smooth, 3)
- padRawKPadSmooth = move_avg(padRawKPad, smooth)
- psdRaw[k] = padRawKPadSmooth[padVal:-padVal]
-
- else:
- psdRaw[k] = np.vstack((psdRaw[k], np.interp(x_periods[k], ppsds[k]['period_bin_centers'], ppsd_t)))
- if smooth is not False:
- padRawKiPad = np.pad(psdRaw[k][i], [padVal, padVal], mode='reflect')
- #padRawKiPadSmooth = scipy.signal.savgol_filter(padRawKiPad, smooth, 3)
- padRawKiPadSmooth = move_avg(padRawKiPad, smooth)
- psdRaw[k][i] = padRawKiPadSmooth[padVal:-padVal]
-
- else:
- #If no resampling desired
- #x_periods[k] = np.array(ppsds[k]['period_bin_centers'])
- x_periods[k] = np.round([1/p for p in hvsr_data['ppsds'][k]['period_xedges'][:-1]],3)
- x_periods[k][0] = hvsr_data['hvsr_band'][1]
- x_periods[k][-1] = hvsr_data['hvsr_band'][0]
- psdRaw[k] = np.array(input_ppsds)
-
- hvsrDF['psd_values_'+k] = list(psdRaw[k])
-
- #Get average psd value across time for each channel (used to calc main H/V curve)
- psdValsTAvg[k] = np.nanmean(np.array(psdRaw[k]), axis=0)
- x_freqs[k] = np.array([1/p for p in x_periods[k]]) #np.divide(np.ones_like(x_periods[k]), x_periods[k])
- stDev[k] = np.std(psdRaw[k], axis=0)
- stDevValsM[k] = np.array(psdValsTAvg[k] - stDev[k])
- stDevValsP[k] = np.array(psdValsTAvg[k] + stDev[k])
-
- currTimesUsed[k] = np.array(hvsrDF['TimesProcessed_Obspy'][hvsrDF['Use']].values)
- #currTimesUsed[k] = ppsds[k]['current_times_used'] #original one
-
- #Get string of method type
- if type(method) is int:
- methodInt = method
- method = methodList[method]
- hvsr_data['method'] = method
-
- #This gets the main hvsr curve averaged from all time steps
- anyK = list(x_freqs.keys())[0]
- hvsr_curve, _ = __get_hvsr_curve(x=x_freqs[anyK], psd=psdValsTAvg, method=methodInt, hvsr_data=hvsr_data, verbose=verbose)
- origPPSD = hvsr_data['ppsds_obspy'].copy()
-
-
- #Add some other variables to our output dictionary
- hvsr_dataUpdate = {'input_params':hvsr_data,
- 'x_freqs':x_freqs,
- 'hvsr_curve':hvsr_curve,
- 'x_period':x_periods,
- 'psd_raw':psdRaw,
- 'current_times_used': currTimesUsed,
- 'psd_values_tavg':psdValsTAvg,
- 'ppsd_std':stDev,
- 'ppsd_std_vals_m':stDevValsM,
- 'ppsd_std_vals_p':stDevValsP,
- 'method':method,
- 'ppsds':ppsds,
- 'ppsds_obspy':origPPSD,
- 'tsteps_used': hvsr_data['tsteps_used'].copy(),
- 'hvsr_df':hvsr_data['hvsr_df']
- }
-
- hvsr_out = HVSRData(hvsr_dataUpdate)
-
- #This is if manual editing was used (should probably be updated at some point to just use masks)
- if 'xwindows_out' in hvsr_data.keys():
- hvsr_out['xwindows_out'] = hvsr_data['xwindows_out']
- else:
- hvsr_out['xwindows_out'] = []
-
-
- freq_smooth_ko = ['konno ohmachi', 'konno-ohmachi', 'konnoohmachi', 'konnohmachi', 'ko', 'k']
- freq_smooth_constant = ['constant', 'const', 'c']
- freq_smooth_proport = ['proportional', 'proportion', 'prop', 'p']
-
- #Frequency Smoothing
- if not freq_smooth:
- if verbose:
- warnings.warn('No frequency smoothing is being applied. This is not recommended for noisy datasets.')
- elif freq_smooth is True or freq_smooth.lower() in freq_smooth_ko:
- from obspy.signal import konnoohmachismoothing
- for k in hvsr_out['psd_raw']:
- colName = f'psd_values_{k}'
-
- ppsd_data = np.stack(hvsr_out['hvsr_df'][colName])
- ppsd_data = hvsr_out['psd_raw'][k]
-
-
- freqs = hvsr_out['x_freqs'][k]
- padding_length = int(f_smooth_width)
-
- padding_value_R = np.nanmean(ppsd_data[:,-1*padding_length:])
- padding_value_L = np.nanmean(ppsd_data[:,:padding_length])
-
- # Pad the data to prevent boundary anamolies
- padded_ppsd_data = np.pad(ppsd_data, ((0, 0), (padding_length, padding_length)),
- 'constant', constant_values=(padding_value_L, padding_value_R))
-
- # Pad the frequencies
- ratio = freqs[1] / freqs[0]
- # Generate new elements on either side and combine
- left_padding = [freqs[0] / (ratio ** i) for i in range(padding_length, 0, -1)]
- right_padding = [freqs[-1] * (ratio ** i) for i in range(1, padding_length + 1)]
- padded_freqs = np.concatenate([left_padding, freqs, right_padding])
-
- #Filter out UserWarning for just this method, since it throws up a UserWarning that doesn't really matter about dtypes often
- with warnings.catch_warnings():
- warnings.simplefilter('ignore', category=UserWarning)
- smoothed_ppsd_data = konnoohmachismoothing.konno_ohmachi_smoothing(padded_ppsd_data,
- padded_freqs, bandwidth=f_smooth_width, normalize=True)
-
- #Just use the original data
- smoothed_ppsd_data = smoothed_ppsd_data[:,padding_length:-1*padding_length]
- hvsr_out['psd_raw'][k] = smoothed_ppsd_data
- hvsr_out['hvsr_df'][colName] = pd.Series(list(smoothed_ppsd_data), index=hvsr_out['hvsr_df'].index)
-
- elif freq_smooth.lower() in freq_smooth_constant:
- hvsr_out = __freq_smooth_window(hvsr_out, f_smooth_width, kind_freq_smooth='constant')
- elif freq_smooth.lower() in freq_smooth_proport:
- hvsr_out = __freq_smooth_window(hvsr_out, f_smooth_width, kind_freq_smooth='proportional')
- else:
- if verbose:
- warnings.warn(f'You indicated no frequency smoothing should be applied (freq_smooth = {freq_smooth}). This is not recommended for noisy datasets.')
-
- #Get hvsr curve from three components at each time step
- anyK = list(hvsr_out['psd_raw'].keys())[0]
- if method==1 or method =='dfa' or method =='Diffuse Field Assumption':
- pass ###UPDATE HERE NEXT???__get_hvsr_curve(x=hvsr_out['x_freqs'][anyK], psd=tStepDict, method=methodInt, hvsr_data=hvsr_out, verbose=verbose)
- else:
- hvsr_tSteps = []
- for tStep in range(len(hvsr_out['psd_raw'][anyK])):
- tStepDict = {}
- for k in hvsr_out['psd_raw']:
- tStepDict[k] = hvsr_out['psd_raw'][k][tStep]
- hvsr_tstep, _ = __get_hvsr_curve(x=hvsr_out['x_freqs'][anyK], psd=tStepDict, method=methodInt, hvsr_data=hvsr_out, verbose=verbose)
- hvsr_tSteps.append(np.float32(hvsr_tstep)) #Add hvsr curve for each time step to larger list of arrays with hvsr_curves
- hvsr_out['hvsr_df']['HV_Curves'] = hvsr_tSteps
-
- hvsr_out['ind_hvsr_curves'] = np.stack(hvsr_out['hvsr_df']['HV_Curves'][hvsr_out['hvsr_df']['Use']])
- #hvsr_out['ind_hvsr_curves'] = np.array(hvsr_tSteps)
-
- #Initialize array based only on the curves we are currently using
- indHVCurvesArr = np.stack(hvsr_out['hvsr_df']['HV_Curves'][hvsr_out['hvsr_df']['Use']])
- #indHVCurvesArr = hvsr_out['ind_hvsr_curves']
-
- if outlier_curve_rmse_percentile:
- if outlier_curve_rmse_percentile is True:
- outlier_curve_rmse_percentile = 98
- hvsr_out = remove_outlier_curves(hvsr_out, use_percentile=True, rmse_thresh=outlier_curve_rmse_percentile, use_hv_curve=True, verbose=verbose)
-
- hvsr_out['ind_hvsr_stdDev'] = np.nanstd(indHVCurvesArr, axis=0)
-
- #Get peaks for each time step
- tStepPeaks = []
- for tStepHVSR in hvsr_tSteps:
- tStepPeaks.append(__find_peaks(tStepHVSR))
- hvsr_out['ind_hvsr_peak_indices'] = tStepPeaks
- hvsr_out['hvsr_df']['CurvesPeakIndices'] = tStepPeaks
-
- tStepPFList = []
- for tPeaks in tStepPeaks:
- tStepPFs = []
- for pInd in tPeaks:
- tStepPFs.append(np.float32(hvsr_out['x_freqs'][anyK][pInd]))
- tStepPFList.append(tStepPFs)
- hvsr_out['hvsr_df']['CurvesPeakFreqs'] = tStepPFList
-
- #Get peaks of main HV curve
- hvsr_out['hvsr_peak_indices'] = __find_peaks(hvsr_out['hvsr_curve'])
-
- #Get frequency values at HV peaks in main curve
- hvsrPF=[]
- for p in hvsr_out['hvsr_peak_indices']:
- hvsrPF.append(hvsr_out['x_freqs'][anyK][p])
- hvsr_out['hvsr_peak_freqs'] = np.array(hvsrPF)
-
-
- #Get other HVSR parameters (i.e., standard deviations, etc.)
- hvsr_out = __gethvsrparams(hvsr_out)
-
- #Include the original obspy stream in the output
- hvsr_out['input_stream'] = hvsr_dataUpdate['input_params']['input_stream'] #input_stream
- hvsr_out = sprit_utils.make_it_classy(hvsr_out)
- hvsr_out['ProcessingStatus']['HVStatus'] = True
-
- if 'processing_parameters' not in hvsr_out.keys():
- hvsr_out['processing_parameters'] = {}
- hvsr_out['processing_parameters']['generate_ppsds'] = {}
- for key, value in orig_args.items():
- hvsr_out['processing_parameters']['generate_ppsds'][key] = value
-
- hvsr_out = _check_processing_status(hvsr_out, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose)
-
- return hvsr_out
-
def read_from_RS(dest, src='SHAKENAME@HOSTNAME:/opt/data/archive/YEAR/AM/STATION/', opts='az', username='myshake', password='shakeme', hostname='rs.local', year='2023', sta='RAC84', sleep_time=0.1, verbose=True, save_progress=True, method='scp')
-
-
-Expand source code
-
-def read_from_RS(dest, src='SHAKENAME@HOSTNAME:/opt/data/archive/YEAR/AM/STATION/', opts='az', username='myshake', password='shakeme',hostname='rs.local', year='2023', sta='RAC84',sleep_time=0.1, verbose=True, save_progress=True, method='scp'):
- src = src.replace('SHAKENAME', username)
- src = src.replace('SHAKENAME', hostname)
- src = src.replace('YEAR', year)
- src = src.replace('STATION', sta)
-
- if method == 'src':
- """This does not work from within a virtual environment!!!!"""
- #import pexpect
- import sys
- #from pexpect import popen_spawn
- import time
- import wexpect
-
- scp_command = 'scp -r {} "{}"'.format(src, dest)
-
- print('Command:', scp_command)
- child = wexpect.spawn(scp_command, timeout=5)
-
- child.expect("password:")
- child.sendline(password)
-
- child.expect(wexpect.EOF)
-
- print("Files have been successfully transferred to {}!".format(dest))
- elif method=='rsync':
- if verbose:
- opts = opts + 'v'
- if save_progress:
- opts = opts + 'p'
-
- #import subprocess
- #subprocess.run(["rsync", "-"+opts, src, dest])
- #subprocess.run(["rsync", "-"+opts, src, dest])
-
- import pty
- #Test, from https://stackoverflow.com/questions/13041732/ssh-password-through-python-subprocess
- command = [
- 'rsync',
- "-"+opts,
- src,
- dest
- #'{0}@{1}'.format(shakename, hostname),
- #'-o', 'NumberOfPasswordPrompts=1',
- #'sleep {0}'.format(sleep_time),
- ]
-
- # PID = 0 for child, and the PID of the child for the parent
- pid, child_fd = pty.fork()
-
- if not pid: # Child process
- # Replace child process with our SSH process
- os.execv(command[0], command)
-
- while True:
- output = os.read(child_fd, 1024).strip()
- lower = output.lower()
- # Write the password
- if lower.endswith('password:'):
- os.write(child_fd, password + '\n')
- break
- elif 'are you sure you want to continue connecting' in lower:
- # Adding key to known_hosts
- os.write(child_fd, 'yes\n')
- elif 'company privacy warning' in lower:
- pass # This is an understood message
- else:
- print("SSH Connection Failed",
- "Encountered unrecognized message when spawning "
- "the SSH tunnel: '{0}'".format(output))
-
- return dest
-
-def read_tromino_files(datapath, params, sampling_rate=128, start_byte=24576, verbose=False, **kwargs)
+def read_tromino_files(input_data, params, struct_format='H', sampling_rate=128, start_byte=24576, verbose=False, **kwargs)
Function to read data from tromino. Specifically, this has been lightly tested on Tromino 3G+ machines
Parameters
-datapath
: str, pathlib.Path()
+input_data
: str, pathlib.Path()
- The input parameter datapath from sprit.input_params()
params
: HVSRData
or HVSRBatch
- The parameters as read in from input_params() and and fetch_data()
@@ -6322,100 +769,9 @@ Returns
obspy.Stream
- An obspy.Stream object containing the trace data from the Tromino instrument
-
-
-Expand source code
-
-def read_tromino_files(datapath, params, sampling_rate=128, start_byte=24576, verbose=False, **kwargs):
- """Function to read data from tromino. Specifically, this has been lightly tested on Tromino 3G+ machines
-
- Parameters
- ----------
- datapath : str, pathlib.Path()
- The input parameter _datapath_ from sprit.input_params()
- params : HVSRData or HVSRBatch
- The parameters as read in from input_params() and and fetch_data()
- verbose : bool, optional
- Whether to print results to terminal, by default False
-
- Returns
- -------
- obspy.Stream
- An obspy.Stream object containing the trace data from the Tromino instrument
- """
- dPath = datapath
-
- strucSizes = {'c':1, 'b':1,'B':1, '?':1,
- 'h':2,'H':2,'e':2,
- 'i':4,'I':4,'l':4,'L':4,'f':4,
- 'q':8,'Q':8,'d':8,
- 'n':8,'N':8,'s':16,'p':16,'P':16,'x':16}
-
- #H (pretty sure it's Q) I L or Q all seem to work (probably not Q?)
- structFormat = 'H'
- structSize = strucSizes[structFormat]
-
- dataList = []
- with open(dPath, 'rb') as f:
- while True:
- data = f.read(structSize) # Read 4 bytes
- if not data: # End of file
- break
- value = struct.unpack(structFormat, data)[0] # Interpret as a float
- dataList.append(value)
-
- import numpy as np
- dataArr = np.array(dataList)
- import matplotlib.pyplot as plt
-
- medVal = np.nanmedian(dataArr[50000:100000])
-
- if 'start_byte' in kwargs.keys():
- start_byte = kwargs['start_byte']
-
- startByte = start_byte
- comp1 = dataArr[startByte::3] - medVal
- comp2 = dataArr[startByte+1::3] - medVal
- comp3 = dataArr[startByte+2::3] - medVal
- headerBytes = dataArr[:startByte]
-
- #fig, ax = plt.subplots(3, sharex=True, sharey=True)
- #ax[0].plot(comp1, linewidth=0.1, c='k')
- #ax[1].plot(comp2, linewidth=0.1, c='k')
- #ax[2].plot(comp3, linewidth=0.1, c='k')
-
- if 'sampling_rate' in kwargs.keys():
- sampling_rate = kwargs['sampling_rate']
-
- sTime = obspy.UTCDateTime(params['acq_date'].year, params['acq_date'].month, params['acq_date'].day,
- params['starttime'].hour, params['starttime'].minute,
- params['starttime'].second,params['starttime'].microsecond)
- eTime = sTime + (((len(comp1))/sampling_rate)/60)*60
-
- traceHeader1 = {'sampling_rate':sampling_rate,
- 'calib' : 1,
- 'npts':len(comp1),
- 'network':'AM',
- 'location':'00',
- 'station' : 'TRMNO',
- 'channel':'BHE',
- 'starttime':sTime}
-
- traceHeader2=traceHeader1.copy()
- traceHeader3=traceHeader1.copy()
- traceHeader2['channel'] = 'BHN'
- traceHeader3['channel'] = 'BHZ'
-
- trace1 = obspy.Trace(data=comp1, header=traceHeader1)
- trace2 = obspy.Trace(data=comp2, header=traceHeader2)
- trace3 = obspy.Trace(data=comp3, header=traceHeader3)
-
- st = obspy.Stream([trace1, trace2, trace3])
- return st
-
-def remove_noise(hvsr_data, remove_method='auto', sat_percent=0.995, noise_percent=0.8, sta=2, lta=30, stalta_thresh=[0.5, 5], warmup_time=0, cooldown_time=0, min_win_size=1, remove_raw_noise=False, verbose=False)
+def remove_noise(hvsr_data, remove_method=None, processing_window=None, sat_percent=0.995, noise_percent=0.8, sta=2, lta=30, stalta_thresh=[8, 16], warmup_time=0, cooldown_time=0, min_win_size=1, remove_raw_noise=False, show_stalta_plot=False, verbose=False)
Function to remove noisy windows from data, using various methods.
@@ -6432,6 +788,10 @@ Parameters
remove_method
: str, {'auto', 'manual', 'stalta'/'antitrigger', 'saturation threshold', 'noise threshold', 'warmup'/'cooldown'/'buffer'/'warm_cool'}
The different methods for removing noise from the dataset. A list of strings will also work, in which case, it should be a list of the above strings. See descriptions above for what how each method works. By default 'auto.'
If remove_method='auto', this is the equivalent of remove_method=['noise threshold', 'antitrigger', 'saturation threshold', 'warm_cool']
+processing_window
: list, tuple,
or None
+A list/tuple of two items [s, e] or a list/tuple of two-item lists/tuples [[s0, e0], [s1,e1],…[sn, en]] with start and end time(s) for windows to keep for processing.
+Data outside of these times will be excluded from processing.
+Times should be obspy.UTCDateTime objects to ensure precision, but time strings ("13:05") will also work in most cases (excpetions may be when the data stream starts/ends on different UTC days)
sat_percent
: float
, default=0.995
Percentage (between 0 and 1), to use as the threshold at which to remove data. This is used in the saturation method. By default 0.995.
If a value is passed that is greater than 1, it will be divided by 100 to obtain the percentage.
@@ -6445,266 +805,24 @@ Parameters
stalta_thresh
: list
, default=[0.5,5]
Two-item list or tuple with the thresholds for the stalta antitrigger. The first value (index [0]) is the lower threshold, the second value (index [1] is the upper threshold), by default [0.5,5]
warmup_time
: int
, default=0
-Time in seconds to allow for warmup of the instrument (or while operator is still near instrument). This will renove any data before this time, by default 0.
-cooldown_time
: int
, default=0
-Time in seconds to allow for cooldown of the instrument (or for when operator is nearing instrument). This will renove any data before this time, by default 0.
-min_win_size
: float
, default=1
-The minumum size a window must be over specified threshold (in seconds) for it to be removed
-remove_raw_noise
: bool
, default=False
-If remove_raw_noise=True, will perform operation on raw data ('input_stream'), rather than potentially already-modified data ('stream').
-verbose
: bool
, default=False
-Whether to print status of remove_noise
-
-Returns
-
-output
: dict
-- Dictionary similar to hvsr_data, but containing modified data with 'noise' removed
-
-
-
-Expand source code
-
-def remove_noise(hvsr_data, remove_method='auto', sat_percent=0.995, noise_percent=0.80, sta=2, lta=30, stalta_thresh=[0.5,5], warmup_time=0, cooldown_time=0, min_win_size=1, remove_raw_noise=False, verbose=False):
- """Function to remove noisy windows from data, using various methods.
-
- Methods include
- - Manual window selection (by clicking on a chart with spectrogram and stream data),
- - Auto window selection, which does the following two in sequence (these can also be done indepently):
- - A sta/lta "antitrigger" method (using stalta values to automatically remove triggered windows where there appears to be too much noise)
- - A noise threshold method, that cuts off all times where the noise threshold equals more than (by default) 80% of the highest amplitude noise sample for the length specified by lta (in seconds)
- - A saturation threshold method, that cuts off all times where the noise threshold equals more than (by default) 99.5% of the highest amplitude noise sample.
-
- Parameters
- ----------
- hvsr_data : dict, obspy.Stream, or obspy.Trace
- Dictionary containing all the data and parameters for the HVSR analysis
- remove_method : str, {'auto', 'manual', 'stalta'/'antitrigger', 'saturation threshold', 'noise threshold', 'warmup'/'cooldown'/'buffer'/'warm_cool'}
- The different methods for removing noise from the dataset. A list of strings will also work, in which case, it should be a list of the above strings. See descriptions above for what how each method works. By default 'auto.'
- If remove_method='auto', this is the equivalent of remove_method=['noise threshold', 'antitrigger', 'saturation threshold', 'warm_cool']
- sat_percent : float, default=0.995
- Percentage (between 0 and 1), to use as the threshold at which to remove data. This is used in the saturation method. By default 0.995.
- If a value is passed that is greater than 1, it will be divided by 100 to obtain the percentage.
- noise_percent : float, default = 0.8
- Percentage (between 0 and 1), to use as the threshold at which to remove data, if it persists for longer than time (in seconds (specified by min_win_size)). This is used in the noise threshold method. By default 0.8.
- If a value is passed that is greater than 1, it will be divided by 100 to obtain the percentage.
- sta : int, optional
- Short term average (STA) window (in seconds), by default 2. For use with sta/lta antitrigger method.
- lta : int, optional
- Long term average (STA) window (in seconds), by default 30. For use with sta/lta antitrigger method.
- stalta_thresh : list, default=[0.5,5]
- Two-item list or tuple with the thresholds for the stalta antitrigger. The first value (index [0]) is the lower threshold, the second value (index [1] is the upper threshold), by default [0.5,5]
- warmup_time : int, default=0
- Time in seconds to allow for warmup of the instrument (or while operator is still near instrument). This will renove any data before this time, by default 0.
- cooldown_time : int, default=0
- Time in seconds to allow for cooldown of the instrument (or for when operator is nearing instrument). This will renove any data before this time, by default 0.
- min_win_size : float, default=1
- The minumum size a window must be over specified threshold (in seconds) for it to be removed
- remove_raw_noise : bool, default=False
- If remove_raw_noise=True, will perform operation on raw data ('input_stream'), rather than potentially already-modified data ('stream').
- verbose : bool, default=False
- Whether to print status of remove_noise
-
- Returns
- -------
- output : dict
- Dictionary similar to hvsr_data, but containing modified data with 'noise' removed
- """
- #Get intput paramaters
- orig_args = locals().copy()
- start_time = datetime.datetime.now()
-
- # Update with processing parameters specified previously in input_params, if applicable
- if 'processing_parameters' in hvsr_data.keys():
- if 'remove_noise' in hvsr_data['processing_parameters'].keys():
- for k, v in hvsr_data['processing_parameters']['remove_noise'].items():
- defaultVDict = dict(zip(inspect.getfullargspec(remove_noise).args[1:],
- inspect.getfullargspec(remove_noise).defaults))
- # Manual input to function overrides the imported parameter values
- if (not isinstance(v, (HVSRData, HVSRBatch))) and (k in orig_args.keys()) and (orig_args[k]==defaultVDict[k]):
- orig_args[k] = v
-
- remove_method = orig_args['remove_method']
- sat_percent = orig_args['sat_percent']
- noise_percent = orig_args['noise_percent']
- sta = orig_args['sta']
- lta = orig_args['lta']
- stalta_thresh = orig_args['stalta_thresh']
- warmup_time = orig_args['warmup_time']
- cooldown_time = orig_args['cooldown_time']
- min_win_size = orig_args['min_win_size']
- remove_raw_noise = orig_args['remove_raw_noise']
- verbose = orig_args['verbose']
-
- if (verbose and isinstance(hvsr_data, HVSRBatch)) or (verbose and not hvsr_data['batch']):
- if isinstance(hvsr_data, HVSRData) and hvsr_data['batch']:
- pass
- else:
- print('\nRemoving noisy data windows (remove_noise())')
- print('\tUsing the following parameters:')
- for key, value in orig_args.items():
- if key=='hvsr_data':
- pass
- else:
- print('\t {}={}'.format(key, value))
-
- #Setup lists
- manualList = ['manual', 'man', 'm', 'window', 'windows', 'w']
- autoList = ['auto', 'automatic', 'all', 'a']
- antitrigger = ['stalta', 'anti', 'antitrigger', 'trigger', 'at']
- saturationThresh = ['saturation threshold', 'saturation', 'sat', 's']
- noiseThresh = ['noise threshold', 'noise', 'threshold', 'n']
- warmup_cooldown=['warmup', 'cooldown', 'warm', 'cool', 'buffer', 'warmup-cooldown', 'warmup_cooldown', 'wc', 'warm_cool', 'warm-cool']
-
- #Get Stream from hvsr_data
- if isinstance(hvsr_data, HVSRBatch):
- #If running batch, we'll loop through each site
- hvsr_out = {}
- for site_name in hvsr_data.keys():
- args = orig_args.copy() #Make a copy so we don't accidentally overwrite
- args['hvsr_data'] = hvsr_data[site_name] #Get what would normally be the "hvsr_data" variable for each site
- if hvsr_data[site_name]['ProcessingStatus']['OverallStatus']:
- try:
- hvsr_out[site_name] = __remove_noise_batch(**args) #Call another function, that lets us run this function again
- except Exception as e:
- hvsr_out[site_name]['ProcessingStatus']['RemoveNoiseStatus']=False
- hvsr_out[site_name]['ProcessingStatus']['OverallStatus']=False
- if verbose:
- print(e)
- else:
- hvsr_data[site_name]['ProcessingStatus']['RemoveNoiseStatus']=False
- hvsr_data[site_name]['ProcessingStatus']['OverallStatus']=False
- hvsr_out = hvsr_data
-
- output = HVSRBatch(hvsr_out)
- return output
- elif isinstance(hvsr_data, (HVSRData, dict, obspy.Stream, obspy.Trace)):
- if isinstance(hvsr_data, (HVSRData, dict)):
- if remove_raw_noise:
- inStream = hvsr_data['input_stream'].copy()
- else:
- inStream = hvsr_data['stream'].copy()
- output = hvsr_data#.copy()
- else:
- inStream = hvsr_data.copy()
- output = inStream.copy()
-
- outStream = inStream
-
- if isinstance(remove_method, str):
- if ',' in remove_method:
- remove_method = remove_method.split(',')
- else:
- remove_method = [remove_method]
- elif isinstance(remove_method, (list, tuple)):
- pass
- elif not remove_method:
- remove_method=[None]
- else:
- warnings.warn(f"Input value remove_method={remove_method} must be either string, list of strings, None, or False. No noise removal will be carried out. Please choose one of the following: 'manual', 'auto', 'antitrigger', 'noise threshold', 'warmup_cooldown'.")
- return output
-
- #Reorder list so manual is always first
- if len(set(remove_method).intersection(manualList)) > 0:
- manInd = list(set(remove_method).intersection(manualList))[0]
- remove_method.remove(manInd)
- remove_method.insert(0, manInd)
-
- #Go through each type of removal and remove
- for rem_kind in remove_method:
- if not rem_kind:
- break
- elif rem_kind.lower() in manualList:
- if isinstance(output, (HVSRData, dict)):
- if 'xwindows_out' in output.keys():
- pass
- else:
- output = _select_windows(output)
- window_list = output['xwindows_out']
- if isinstance(outStream, obspy.core.stream.Stream):
- if window_list is not None:
- output['stream'] = __remove_windows(inStream, window_list, warmup_time)
- else:
- output = _select_windows(output)
- elif isinstance(output, (HVSRData, dict)):
- pass
- else:
- RuntimeError("Only obspy.core.stream.Stream data type is currently supported for manual noise removal method.")
- elif rem_kind.lower() in autoList:
- outStream = __remove_noise_thresh(outStream, noise_percent=noise_percent, lta=lta, min_win_size=min_win_size)
- outStream = __remove_anti_stalta(outStream, sta=sta, lta=lta, thresh=stalta_thresh)
- outStream = __remove_noise_saturate(outStream, sat_percent=sat_percent, min_win_size=min_win_size)
- outStream = __remove_warmup_cooldown(stream=outStream, warmup_time=warmup_time, cooldown_time=cooldown_time)
- elif rem_kind.lower() in antitrigger:
- outStream = __remove_anti_stalta(outStream, sta=sta, lta=lta, thresh=stalta_thresh)
- elif rem_kind.lower() in saturationThresh:
- outStream = __remove_noise_saturate(outStream, sat_percent=sat_percent, min_win_size=min_win_size)
- elif rem_kind.lower() in noiseThresh:
- outStream = __remove_noise_thresh(outStream, noise_percent=noise_percent, lta=lta, min_win_size=min_win_size)
- elif rem_kind.lower() in warmup_cooldown:
- outStream = __remove_warmup_cooldown(stream=outStream, warmup_time=warmup_time, cooldown_time=cooldown_time)
- else:
- if len(remove_method)==1:
- warnings.warn(f"Input value remove_method={remove_method} is not recognized. No noise removal will be carried out. Please choose one of the following: 'manual', 'auto', 'antitrigger', 'noise threshold', 'warmup_cooldown'.")
- break
- warnings.warn(f"Input value remove_method={remove_method} is not recognized. Continuing with other noise removal methods.")
-
- #Add output
- if isinstance(output, (HVSRData, dict)):
- if isinstance(outStream, (obspy.Stream, obspy.Trace)):
- output['stream'] = outStream
- else:
- output['stream'] = outStream['stream']
- output['input_stream'] = hvsr_data['input_stream']
-
- if 'processing_parameters' not in output.keys():
- output['processing_parameters'] = {}
- output['processing_parameters']['remove_noise'] = {}
- for key, value in orig_args.items():
- output['processing_parameters']['remove_noise'][key] = value
-
- output['ProcessingStatus']['RemoveNoiseStatus'] = True
- output = _check_processing_status(output, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose)
-
- if 'hvsr_df' in output.keys() or ('params' in output.keys() and 'hvsr_df' in output['params'].keys())or ('input_params' in output.keys() and 'hvsr_df' in output['input_params'].keys()):
- hvsrDF = output['hvsr_df']
-
- outStream = output['stream'].split()
- for i, trace in enumerate(outStream):
- if i ==0:
- trEndTime = trace.stats.endtime
- comp_end = trace.stats.component
- continue
- trStartTime = trace.stats.starttime
- comp_start = trace.stats.component
-
- if trEndTime < trStartTime and comp_end==comp_start:
- gap = [trEndTime,trStartTime]
-
- output['hvsr_df']['Use'] = (hvsrDF['TimesProcessed_Obspy'].gt(gap[0]) & hvsrDF['TimesProcessed_Obspy'].gt(gap[1]) )| \
- (hvsrDF['TimesProcessed_ObspyEnd'].lt(gap[0]) & hvsrDF['TimesProcessed_ObspyEnd'].lt(gap[1]))# | \
- output['hvsr_df']['Use'] = output['hvsr_df']['Use'].astype(bool)
-
- trEndTime = trace.stats.endtime
-
- outStream.merge()
- output['stream'] = outStream
-
- elif isinstance(hvsr_data, obspy.Stream) or isinstance(hvsr_data, obspy.Trace):
- output = outStream
- else:
- warnings.warn(f"Output of type {type(output)} for this function will likely result in errors in other processing steps. Returning hvsr_data data.")
- return hvsr_data
- output = sprit_utils.make_it_classy(output)
- if 'xwindows_out' not in output.keys():
- output['xwindows_out'] = []
- else:
- RuntimeError(f"Input of type type(hvsr_data)={type(hvsr_data)} cannot be used.")
-
- return output
-
+Time in seconds to allow for warmup of the instrument (or while operator is still near instrument). This will renove any data before this time, by default 0.
+ cooldown_time
: int
, default=0
+Time in seconds to allow for cooldown of the instrument (or for when operator is nearing instrument). This will renove any data before this time, by default 0.
+min_win_size
: float
, default=1
+The minumum size a window must be over specified threshold (in seconds) for it to be removed
+remove_raw_noise
: bool
, default=False
+If remove_raw_noise=True, will perform operation on raw data ('input_stream'), rather than potentially already-modified data ('stream').
+verbose
: bool
, default=False
+Whether to print status of remove_noise
+
+Returns
+
+output
: dict
+- Dictionary similar to hvsr_data, but containing modified data with 'noise' removed
+
-def remove_outlier_curves(hvsr_data, rmse_thresh=98, use_percentile=True, use_hv_curve=False, show_outlier_plot=False, verbose=False)
+def remove_outlier_curves(hvsr_data, rmse_thresh=98, use_percentile=True, use_hv_curve=False, plot_engine='matplotlib', show_plot=False, verbose=False)
Function used to remove outliers curves using Root Mean Square Error to calculate the error of each windowed
@@ -6734,241 +852,285 @@
Returns
hvsr_data
: dict
Input dictionary with values modified based on work of function.
-
-
-Expand source code
-
-def remove_outlier_curves(hvsr_data, rmse_thresh=98, use_percentile=True, use_hv_curve=False, show_outlier_plot=False, verbose=False):
- """Function used to remove outliers curves using Root Mean Square Error to calculate the error of each windowed
- Probabilistic Power Spectral Density (PPSD) curve against the median PPSD value at each frequency step for all times.
- It calculates the RMSE for the PPSD curves of each component individually. All curves are removed from analysis.
-
- Some abberant curves often occur due to the remove_noise() function, so this should be run some time after remove_noise().
- In general, the recommended workflow is to run this immediately following the generate_ppsds() function.
-
- Parameters
- ----------
- hvsr_data : dict
- Input dictionary containing all the values and parameters of interest
- rmse_thresh : float or int, default=98
- The Root Mean Square Error value to use as a threshold for determining whether a curve is an outlier.
- This averages over each individual entire curve so that curves with very abberant data (often occurs when using the remove_noise() method), can be identified.
- Otherwise, specify a float or integer to use as the cutoff RMSE value (all curves with RMSE above will be removed)
- use_percentile : float, default=True
- Whether rmse_thresh should be interepreted as a raw RMSE value or as a percentile of the RMSE values.
- use_hv_curve : bool, default=False
- Whether to use the calculated HV Curve or the individual components. This can only be True after process_hvsr() has been run.
- show_plot : bool, default=False
- Whether to show a plot of the removed data
- verbose : bool, default=False
- Whether to print output of function to terminal
-
- Returns
- -------
- hvsr_data : dict
- Input dictionary with values modified based on work of function.
- """
- # Setup function
- #Get intput paramaters
- orig_args = locals().copy()
- start_time = datetime.datetime.now()
-
- # Update with processing parameters specified previously in input_params, if applicable
- if 'processing_parameters' in hvsr_data.keys():
- if 'remove_outlier_curves' in hvsr_data['processing_parameters'].keys():
- for k, v in hvsr_data['processing_parameters']['remove_noise'].items():
- defaultVDict = dict(zip(inspect.getfullargspec(remove_outlier_curves).args[1:],
- inspect.getfullargspec(remove_outlier_curves).defaults))
- # Manual input to function overrides the imported parameter values
- if (not isinstance(v, (HVSRData, HVSRBatch))) and (k in orig_args.keys()) and (orig_args[k]==defaultVDict[k]):
- orig_args[k] = v
-
- # Reset parameters in case of manual override of imported parameters
- use_percentile = orig_args['use_percentile']
- rmse_thresh = orig_args['rmse_thresh']
- use_hv_curve = orig_args['use_hv_curve']
- show_outlier_plot = orig_args['show_outlier_plot']
- verbose = orig_args['verbose']
-
- #Print if verbose, which changes depending on if batch data or not
- if (verbose and isinstance(hvsr_data, HVSRBatch)) or (verbose and not hvsr_data['batch']):
- if isinstance(hvsr_data, HVSRData) and hvsr_data['batch']:
- pass
- else:
- print('\nRemoving outlier curves from further analysis (remove_outlier_curves())')
- print('\tUsing the following parameters:')
- for key, value in orig_args.items():
- if key=='hvsr_data':
- pass
- else:
- print('\t {}={}'.format(key, value))
- print()
-
- #First, divide up for batch or not
- #Site is in the keys anytime it's not batch
- if isinstance(hvsr_data, HVSRBatch):
- #If running batch, we'll loop through each site
- hvsr_out = {}
- for site_name in hvsr_data.keys():
- args = orig_args.copy() #Make a copy so we don't accidentally overwrite
- args['hvsr_data'] = hvsr_data[site_name] #Get what would normally be the "hvsr_data" variable for each site
- if hvsr_data[site_name]['ProcessingStatus']['OverallStatus']:
- try:
- hvsr_out[site_name] = __remove_outlier_curves(**args) #Call another function, that lets us run this function again
- except:
- hvsr_out = hvsr_data
- hvsr_out[site_name]['ProcessingStatus']['RemoveOutlierCurves'] = False
- hvsr_out[site_name]['ProcessingStatus']['OverallStatus'] = False
- else:
- hvsr_out = hvsr_data
- hvsr_out[site_name]['ProcessingStatus']['RemoveOutlierCurves'] = False
- hvsr_out[site_name]['ProcessingStatus']['OverallStatus'] = False
- hvsr_out = HVSRBatch(hvsr_out)
- else:
- #Create plot if designated
- if not use_hv_curve:
- compNames = ['Z', 'E', 'N']
- colNames = compNames
- else:
- compNames=['HV Curve']
- colNames = ['HV_Curves']
- if show_outlier_plot:
- if use_hv_curve:
- spMosaic = ['HV Curve']
- else:
- spMosaic = [['Z'],
- ['E'],
- ['N']]
- fig, ax=plt.subplot_mosaic(spMosaic, sharex=True)
-
- #Loop through each component, and determine which curves are outliers
- bad_rmse=[]
- for i, column in enumerate(colNames):
- if column in compNames:
- column = 'psd_values_'+column
- # Retrieve data from dataframe (use all windows, just in case)
- curr_data = np.stack(hvsr_data['hvsr_df'][column])
-
- # Calculate a median curve, and reshape so same size as original
- medCurve = np.nanmedian(curr_data, axis=0)
- medCurveArr = np.tile(medCurve, (curr_data.shape[0], 1))
-
- # Calculate RMSE
- rmse = np.sqrt(((np.subtract(curr_data, medCurveArr)**2).sum(axis=1))/curr_data.shape[1])
- hvsr_data['hvsr_df']['RMSE_'+column] = rmse
- if use_percentile is True:
- rmse_threshold = np.percentile(rmse, rmse_thresh)
- if verbose:
- print(f'\tRMSE at {rmse_thresh}th percentile for {column} calculated at: {rmse_threshold:.2f}')
- else:
- rmse_threshold = rmse_thresh
-
- # Retrieve index of those RMSE values that lie outside the threshold
- for j, curve in enumerate(curr_data):
- if rmse[j] > rmse_threshold:
- bad_rmse.append(j)
-
- # Show plot of removed/retained data
- if show_outlier_plot:
- # Intialize to only get unique labels
- rem_label_got = False
- keep_label_got = False
-
- # Iterate through each curve to determine if it's rmse is outside threshold, for plot
- for j, curve in enumerate(curr_data):
- label=None
- if rmse[j] > rmse_threshold:
- linestyle = 'dashed'
- linecolor='darkred'
- alpha = 1
- linewidth = 1
- if not rem_label_got:
- label='Removed Curve'
- rem_label_got=True
- else:
- linestyle='solid'
- linecolor = 'rosybrown'
- alpha = 0.25
- linewidth=0.5
- if not keep_label_got:
- keep_label_got=True
- label='Retained Curve'
-
- # Plot each individual curve
- ax[compNames[i]].plot(1/hvsr_data.ppsds[compNames[i]]['period_bin_centers'], curve, linewidth=linewidth, c=linecolor, linestyle=linestyle, alpha=alpha, label=label)
-
- # Plot the median curve
- ax[compNames[i]].plot(1/hvsr_data.ppsds[compNames[i]]['period_bin_centers'],medCurve, linewidth=1, color='k', label='Median Curve')
-
- # Format axis
- ax[compNames[i]].set_ylabel(f"{compNames[i]}")
- ax[compNames[i]].legend(fontsize=10, labelspacing=0.1)
- ax[compNames[i]].semilogx()
- if show_outlier_plot:
- plt.show()
-
- # Get unique values of bad_rmse indices and set the "Use" column of the hvsr_df to False for that window
- bad_rmse = np.unique(bad_rmse)
- if len(bad_rmse) > 0:
-
- hvsr_data['hvsr_df']['Use'] = hvsr_data['hvsr_df']['Use'] * (rmse_threshold > hvsr_data['hvsr_df']['RMSE_'+column])
- #hvsr_data['hvsr_df'].loc[bad_index, "Use"] = False
-
- if verbose:
- if len(bad_rmse)>0:
- print(f"\tThe windows starting at the following times have been removed from further analysis ({len(bad_rmse)}/{hvsr_data['hvsr_df'].shape[0]}):")
- for b in hvsr_data['hvsr_df'].index[pd.Series(bad_rmse)]:
- print(f"\t\t{b}")
- else:
- print('\tNo outlier curves have been removed')
-
- hvsr_out = hvsr_data
-
- if 'processing_parameters' not in hvsr_out.keys():
- hvsr_out['processing_parameters'] = {}
- hvsr_out['processing_parameters']['remove_outlier_curves'] = {}
- for key, value in orig_args.items():
- hvsr_out['processing_parameters']['remove_outlier_curves'][key] = value
-
- hvsr_data['ProcessingStatus']['RemoveOutlierCurvesStatus'] = True
-
- hvsr_out = _check_processing_status(hvsr_out, start_time=start_time, func_name=inspect.stack()[0][3], verbose=verbose)
-
- return hvsr_out
-
-def run(datapath, source='file', verbose=False, **kwargs)
+def run(input_data, source='file', azimuth_calculation=False, noise_removal=False, outlier_curves_removal=False, verbose=False, **kwargs)
The sprit.run() is the main function that allows you to do all your HVSR processing in one simple step (sprit.run() is how you would call it in your code, but it may also be called using sprit.sprit_hvsr.run())
-The datapath parameter of sprit.run() is the only required parameter. This can be either a single file, a list of files (one for each component, for example), a directory (in which case, all obspy-readable files will be added to an HVSRBatch instance), a Rasp. Shake raw data directory, or sample data.
-The sprit.run() function calls the following functions. This is the recommended order/set of functions to run to process HVSR using SpRIT. See the API documentation for these functions for more information:
-- input_params(): The datapath parameter of input_params() is the only required variable, though others may also need to be called for your data to process correctly.
-- fetch_data(): the source parameter of fetch_data() is the only explicit variable in the sprit.run() function aside from datapath and verbose. Everything else gets delivered to the correct function via the kwargs dictionary
+The input_data parameter of sprit.run() is the only required parameter. This can be either a single file, a list of files (one for each component, for example), a directory (in which case, all obspy-readable files will be added to an HVSRBatch instance), a Rasp. Shake raw data directory, or sample data.
+Notes
+The sprit.run() function calls the following functions. This is the recommended order/set of functions to run to process HVSR using SpRIT. See the API documentation for these functions for more information:
+- input_params(): The input_data parameter of input_params() is the only required variable, though others may also need to be called for your data to process correctly.
+- fetch_data(): the source parameter of fetch_data() is the only explicit variable in the sprit.run() function aside from input_data and verbose. Everything else gets delivered to the correct function via the kwargs dictionary
- remove_noise(): by default, the kind of noise removal is remove_method='auto'. See the remove_noise() documentation for more information. If remove_method is set to anything other than one of the explicit options in remove_noise, noise removal will not be carried out.
- generate_ppsds(): generates ppsds for each component, which will be combined/used later. Any parameter of obspy.signal.spectral_estimation.PPSD() may also be read into this function.
- remove_outlier_curves(): removes any outlier ppsd curves so that the data quality for when curves are combined will be enhanced. See the remove_outlier_curves() documentation for more information.
- process_hvsr(): this is the main function processing the hvsr curve and statistics. See process_hvsr() documentation for more details. The hvsr_band parameter sets the frequency spectrum over which these calculations occur.
- check_peaks(): this is the main function that will find and 'score' peaks to get a best peak. The parameter peak_freq_range can be set to limit the frequencies within which peaks are checked and scored.
- get_report(): this is the main function that will print, plot, and/or save the results of the data. See the get_report() API documentation for more information.
-- export_data(): this function exports the final data output as a pickle file (by default, this pickle object has a .hvsr extension). This can be used to read data back into SpRIT without having to reprocess data.
-
+- export_data(): this function exports the final data output as a pickle file (by default, this pickle object has a .hvsr extension). This can be used to read data back into SpRIT without having to reprocess data.
Parameters
-datapath
: str
or filepath object that can be read by obspy
+input_data
: str
or filepath object that can be read by obspy
- Filepath to data to be processed. This may be a file or directory, depending on what kind of data is being processed (this can be specified with the source parameter).
-For sample data, The following can be specified as the datapath parameter:
-- Any integer 1-6 (inclusive), or the string (e.g., datapath="1" or datapath=1 will work)
-- The word "sample" before any integer (e.g., datapath="sample1")
+For sample data, The following can be specified as the input_data parameter:
+- Any integer 1-6 (inclusive), or the string (e.g., input_data="1" or input_data=1 will work)
+- The word "sample" before any integer (e.g., input_data="sample1")
- The word "sample" will default to "sample1" if source='file'.
-- If source='batch', datapath should be datapath='sample' or datapath='batch'. In this case, it will read and process all the sample files using the HVSRBatch class. Set verbose=True to see all the information in the sample batch csv file.
+- If source='batch', input_data should be input_data='sample' or input_data='batch'. In this case, it will read and process all the sample files using the HVSRBatch class. Set verbose=True to see all the information in the sample batch csv file.
source
: str
, optional
- description, by default 'file'
+azimuth_calculation
: bool
, optional
+- Whether to perform azimuthal analysis, by default False.
+noise_removal
: bool
, default=False
+- Whether to remove noise (before processing PPSDs)
+outlier_curves_removal
: bool
, default=False
+- Whether to remove outlier curves from HVSR time windows
+show_plot
: bool
, default=True
+- Whether to show plots. This does not affect whether the plots are created (and then inserted as an attribute of HVSRData), only whether they are shown.
verbose
: bool
, optional
- description, by default False
**kwargs
- Keyword arguments for the functions listed above. The keyword arguments are unique, so they will get parsed out and passed into the appropriate function.
+input_params
: function name (not an actual parameter)
+- Function for designating input parameters for reading in and processing data
+See API documentation: input_params()
+input_data
: any
, default = '<no default>'
+- See API documentation at link above or at
help(input_params())
for specifics.
+site
: any
, default = 'HVSR Site'
+- See API documentation at link above or at
help(input_params())
for specifics.
+id_prefix
: any
, default = None
+- See API documentation at link above or at
help(input_params())
for specifics.
+network
: any
, default = 'AM'
+- See API documentation at link above or at
help(input_params())
for specifics.
+station
: any
, default = 'RAC84'
+- See API documentation at link above or at
help(input_params())
for specifics.
+loc
: any
, default = '00'
+- See API documentation at link above or at
help(input_params())
for specifics.
+channels
: any
, default = ['EHZ', 'EHN', 'EHE']
+- See API documentation at link above or at
help(input_params())
for specifics.
+acq_date
: any
, default = '2024-10-30'
+- See API documentation at link above or at
help(input_params())
for specifics.
+starttime
: any
, default = 2024-10-30T00:00:00.000000Z
+- See API documentation at link above or at
help(input_params())
for specifics.
+endtime
: any
, default = 2024-10-30T23:59:59.999999Z
+- See API documentation at link above or at
help(input_params())
for specifics.
+tzone
: any
, default = 'UTC'
+- See API documentation at link above or at
help(input_params())
for specifics.
+xcoord
: any
, default = -88.2290526
+- See API documentation at link above or at
help(input_params())
for specifics.
+ycoord
: any
, default = 40.1012122
+- See API documentation at link above or at
help(input_params())
for specifics.
+elevation
: any
, default = 755
+- See API documentation at link above or at
help(input_params())
for specifics.
+input_crs
: any
, default = None
+- See API documentation at link above or at
help(input_params())
for specifics.
+output_crs
: any
, default = None
+- See API documentation at link above or at
help(input_params())
for specifics.
+elev_unit
: any
, default = 'meters'
+- See API documentation at link above or at
help(input_params())
for specifics.
+depth
: any
, default = 0
+- See API documentation at link above or at
help(input_params())
for specifics.
+instrument
: any
, default = 'Raspberry Shake'
+- See API documentation at link above or at
help(input_params())
for specifics.
+metapath
: any
, default = None
+- See API documentation at link above or at
help(input_params())
for specifics.
+hvsr_band
: any
, default = [0.4, 40]
+- See API documentation at link above or at
help(input_params())
for specifics.
+peak_freq_range
: any
, default = [0.4, 40]
+- See API documentation at link above or at
help(input_params())
for specifics.
+processing_parameters
: any
, default = {}
+- See API documentation at link above or at
help(input_params())
for specifics.
+verbose
: any
, default = False
+- See API documentation at link above or at
help(input_params())
for specifics.
+fetch_data
: function name (not an actual parameter)
+- Fetch ambient seismic data from a source to read into obspy stream
+See API documentation: fetch_data()
+params
: any
, default = '<output
of previous function>'
+- See API documentation at link above or at
help(fetch_data())
for specifics.
+source
: any
, default = 'file'
+- See API documentation at link above or at
help(fetch_data())
for specifics.
+data_export_path
: any
, default = None
+- See API documentation at link above or at
help(fetch_data())
for specifics.
+data_export_format
: any
, default = 'mseed'
+- See API documentation at link above or at
help(fetch_data())
for specifics.
+detrend
: any
, default = 'spline'
+- See API documentation at link above or at
help(fetch_data())
for specifics.
+detrend_order
: any
, default = 2
+- See API documentation at link above or at
help(fetch_data())
for specifics.
+update_metadata
: any
, default = True
+- See API documentation at link above or at
help(fetch_data())
for specifics.
+plot_input_stream
: any
, default = False
+- See API documentation at link above or at
help(fetch_data())
for specifics.
+plot_engine
: any
, default = 'matplotlib'
+- See API documentation at link above or at
help(fetch_data())
for specifics.
+show_plot
: any
, default = True
+- See API documentation at link above or at
help(fetch_data())
for specifics.
+verbose
: any
, default = False
+- See API documentation at link above or at
help(fetch_data())
for specifics.
+kwargs
: any
, default = {}
+- See API documentation at link above or at
help(fetch_data())
for specifics.
+calculate_azimuth
: function name (not an actual parameter)
+- Function to calculate azimuthal horizontal component at specified angle(s). Adds each new horizontal
+See API documentation: calculate_azimuth()
+hvsr_data
: any
, default = '<output
of previous function>'
+- See API documentation at link above or at
help(calculate_azimuth())
for specifics.
+azimuth_angle
: any
, default = 30
+- See API documentation at link above or at
help(calculate_azimuth())
for specifics.
+azimuth_type
: any
, default = 'multiple'
+- See API documentation at link above or at
help(calculate_azimuth())
for specifics.
+azimuth_unit
: any
, default = 'degrees'
+- See API documentation at link above or at
help(calculate_azimuth())
for specifics.
+show_az_plot
: any
, default = False
+- See API documentation at link above or at
help(calculate_azimuth())
for specifics.
+verbose
: any
, default = False
+- See API documentation at link above or at
help(calculate_azimuth())
for specifics.
+plot_azimuth_kwargs
: any
, default = {}
+- See API documentation at link above or at
help(calculate_azimuth())
for specifics.
+remove_noise
: function name (not an actual parameter)
+- Function to remove noisy windows from data, using various methods.
+See API documentation: remove_noise()
+hvsr_data
: any
, default = '<output
of previous function>'
+- See API documentation at link above or at
help(remove_noise())
for specifics.
+remove_method
: any
, default = None
+- See API documentation at link above or at
help(remove_noise())
for specifics.
+processing_window
: any
, default = None
+- See API documentation at link above or at
help(remove_noise())
for specifics.
+sat_percent
: any
, default = 0.995
+- See API documentation at link above or at
help(remove_noise())
for specifics.
+noise_percent
: any
, default = 0.8
+- See API documentation at link above or at
help(remove_noise())
for specifics.
+sta
: any
, default = 2
+- See API documentation at link above or at
help(remove_noise())
for specifics.
+lta
: any
, default = 30
+- See API documentation at link above or at
help(remove_noise())
for specifics.
+stalta_thresh
: any
, default = [8, 16]
+- See API documentation at link above or at
help(remove_noise())
for specifics.
+warmup_time
: any
, default = 0
+- See API documentation at link above or at
help(remove_noise())
for specifics.
+cooldown_time
: any
, default = 0
+- See API documentation at link above or at
help(remove_noise())
for specifics.
+min_win_size
: any
, default = 1
+- See API documentation at link above or at
help(remove_noise())
for specifics.
+remove_raw_noise
: any
, default = False
+- See API documentation at link above or at
help(remove_noise())
for specifics.
+show_stalta_plot
: any
, default = False
+- See API documentation at link above or at
help(remove_noise())
for specifics.
+verbose
: any
, default = False
+- See API documentation at link above or at
help(remove_noise())
for specifics.
+generate_ppsds
: function name (not an actual parameter)
+- Generates PPSDs for each channel
+See API documentation: generate_ppsds()
+hvsr_data
: any
, default = '<output
of previous function>'
+- See API documentation at link above or at
help(generate_ppsds())
for specifics.
+azimuthal_ppsds
: any
, default = False
+- See API documentation at link above or at
help(generate_ppsds())
for specifics.
+verbose
: any
, default = False
+- See API documentation at link above or at
help(generate_ppsds())
for specifics.
+ppsd_kwargs
: any
, default = {}
+- See API documentation at link above or at
help(generate_ppsds())
for specifics.
+process_hvsr
: function name (not an actual parameter)
+- Process the input data and get HVSR data
+See API documentation: process_hvsr()
+hvsr_data
: any
, default = '<output
of previous function>'
+- See API documentation at link above or at
help(process_hvsr())
for specifics.
+horizontal_method
: any
, default = None
+- See API documentation at link above or at
help(process_hvsr())
for specifics.
+smooth
: any
, default = True
+- See API documentation at link above or at
help(process_hvsr())
for specifics.
+freq_smooth
: any
, default = 'konno ohmachi'
+- See API documentation at link above or at
help(process_hvsr())
for specifics.
+f_smooth_width
: any
, default = 40
+- See API documentation at link above or at
help(process_hvsr())
for specifics.
+resample
: any
, default = True
+- See API documentation at link above or at
help(process_hvsr())
for specifics.
+outlier_curve_rmse_percentile
: any
, default = False
+- See API documentation at link above or at
help(process_hvsr())
for specifics.
+azimuth
: any
, default = None
+- See API documentation at link above or at
help(process_hvsr())
for specifics.
+verbose
: any
, default = False
+- See API documentation at link above or at
help(process_hvsr())
for specifics.
+remove_outlier_curves
: function name (not an actual parameter)
+- Function used to remove outliers curves using Root Mean Square Error to calculate the error of each
+See API documentation: remove_outlier_curves()
+hvsr_data
: any
, default = '<output
of previous function>'
+- See API documentation at link above or at
help(remove_outlier_curves())
for specifics.
+rmse_thresh
: any
, default = 98
+- See API documentation at link above or at
help(remove_outlier_curves())
for specifics.
+use_percentile
: any
, default = True
+- See API documentation at link above or at
help(remove_outlier_curves())
for specifics.
+use_hv_curve
: any
, default = False
+- See API documentation at link above or at
help(remove_outlier_curves())
for specifics.
+plot_engine
: any
, default = 'matplotlib'
+- See API documentation at link above or at
help(remove_outlier_curves())
for specifics.
+show_plot
: any
, default = False
+- See API documentation at link above or at
help(remove_outlier_curves())
for specifics.
+verbose
: any
, default = False
+- See API documentation at link above or at
help(remove_outlier_curves())
for specifics.
+check_peaks
: function name (not an actual parameter)
+- Function to run tests on HVSR peaks to find best one and see if it passes quality checks
+See API documentation: check_peaks()
+hvsr_data
: any
, default = '<output
of previous function>'
+- See API documentation at link above or at
help(check_peaks())
for specifics.
+hvsr_band
: any
, default = [0.4, 40]
+- See API documentation at link above or at
help(check_peaks())
for specifics.
+peak_selection
: any
, default = 'max'
+- See API documentation at link above or at
help(check_peaks())
for specifics.
+peak_freq_range
: any
, default = [0.4, 40]
+- See API documentation at link above or at
help(check_peaks())
for specifics.
+azimuth
: any
, default = 'HV'
+- See API documentation at link above or at
help(check_peaks())
for specifics.
+verbose
: any
, default = False
+- See API documentation at link above or at
help(check_peaks())
for specifics.
+get_report
: function name (not an actual parameter)
+- Generate and/or print and/or export a report of the HVSR analysis in a variety of formats.
+See API documentation: get_report()
+hvsr_results
: any
, default = '<output
of previous function>'
+- See API documentation at link above or at
help(get_report())
for specifics.
+report_formats
: any
, default = ['print', 'table', 'plot', 'html', 'pdf']
+- See API documentation at link above or at
help(get_report())
for specifics.
+azimuth
: any
, default = 'HV'
+- See API documentation at link above or at
help(get_report())
for specifics.
+plot_type
: any
, default = 'HVSR p ann C+ p ann Spec p ann'
+- See API documentation at link above or at
help(get_report())
for specifics.
+plot_engine
: any
, default = 'matplotlib'
+- See API documentation at link above or at
help(get_report())
for specifics.
+show_print_report
: any
, default = True
+- See API documentation at link above or at
help(get_report())
for specifics.
+show_table_report
: any
, default = False
+- See API documentation at link above or at
help(get_report())
for specifics.
+show_plot_report
: any
, default = True
+- See API documentation at link above or at
help(get_report())
for specifics.
+show_html_report
: any
, default = False
+- See API documentation at link above or at
help(get_report())
for specifics.
+show_pdf_report
: any
, default = True
+- See API documentation at link above or at
help(get_report())
for specifics.
+suppress_report_outputs
: any
, default = False
+- See API documentation at link above or at
help(get_report())
for specifics.
+show_report_outputs
: any
, default = False
+- See API documentation at link above or at
help(get_report())
for specifics.
+csv_handling
: any
, default = 'append'
+- See API documentation at link above or at
help(get_report())
for specifics.
+report_export_format
: any
, default = None
+- See API documentation at link above or at
help(get_report())
for specifics.
+report_export_path
: any
, default = None
+- See API documentation at link above or at
help(get_report())
for specifics.
+verbose
: any
, default = False
+- See API documentation at link above or at
help(get_report())
for specifics.
+kwargs
: any
, default = {}
+- See API documentation at link above or at
help(get_report())
for specifics.
+export_data
: function name (not an actual parameter)
+- Export data into pickle format that can be read back in using import_data() so data does not need to
+See API documentation: export_data()
+hvsr_data
: any
, default = '<output
of previous function>'
+- See API documentation at link above or at
help(export_data())
for specifics.
+hvsr_export_path
: any
, default = None
+- See API documentation at link above or at
help(export_data())
for specifics.
+ext
: any
, default = 'hvsr'
+- See API documentation at link above or at
help(export_data())
for specifics.
+verbose
: any
, default = False
+- See API documentation at link above or at
help(export_data())
for specifics.
Returns
@@ -6984,256 +1146,12 @@ Raises
RuntimeError
- If the data being processed is a single file, an error will be raised if generate_ppsds() does not work correctly. No errors are raised for remove_noise() errors (since that is an optional step) and the process_hvsr() step (since that is the last processing step) .
-
-
-Expand source code
-
-def run(datapath, source='file', verbose=False, **kwargs):
- """The sprit.run() is the main function that allows you to do all your HVSR processing in one simple step (sprit.run() is how you would call it in your code, but it may also be called using sprit.sprit_hvsr.run())
-
- The datapath parameter of sprit.run() is the only required parameter. This can be either a single file, a list of files (one for each component, for example), a directory (in which case, all obspy-readable files will be added to an HVSRBatch instance), a Rasp. Shake raw data directory, or sample data.
-
- The sprit.run() function calls the following functions. This is the recommended order/set of functions to run to process HVSR using SpRIT. See the API documentation for these functions for more information:
- - input_params(): The datapath parameter of input_params() is the only required variable, though others may also need to be called for your data to process correctly.
- - fetch_data(): the source parameter of fetch_data() is the only explicit variable in the sprit.run() function aside from datapath and verbose. Everything else gets delivered to the correct function via the kwargs dictionary
- - remove_noise(): by default, the kind of noise removal is remove_method='auto'. See the remove_noise() documentation for more information. If remove_method is set to anything other than one of the explicit options in remove_noise, noise removal will not be carried out.
- - generate_ppsds(): generates ppsds for each component, which will be combined/used later. Any parameter of obspy.signal.spectral_estimation.PPSD() may also be read into this function.
- - remove_outlier_curves(): removes any outlier ppsd curves so that the data quality for when curves are combined will be enhanced. See the remove_outlier_curves() documentation for more information.
- - process_hvsr(): this is the main function processing the hvsr curve and statistics. See process_hvsr() documentation for more details. The hvsr_band parameter sets the frequency spectrum over which these calculations occur.
- - check_peaks(): this is the main function that will find and 'score' peaks to get a best peak. The parameter peak_freq_range can be set to limit the frequencies within which peaks are checked and scored.
- - get_report(): this is the main function that will print, plot, and/or save the results of the data. See the get_report() API documentation for more information.
- - export_data(): this function exports the final data output as a pickle file (by default, this pickle object has a .hvsr extension). This can be used to read data back into SpRIT without having to reprocess data.
-
- Parameters
- ----------
- datapath : str or filepath object that can be read by obspy
- Filepath to data to be processed. This may be a file or directory, depending on what kind of data is being processed (this can be specified with the source parameter).
- For sample data, The following can be specified as the datapath parameter:
- - Any integer 1-6 (inclusive), or the string (e.g., datapath="1" or datapath=1 will work)
- - The word "sample" before any integer (e.g., datapath="sample1")
- - The word "sample" will default to "sample1" if source='file'.
- - If source='batch', datapath should be datapath='sample' or datapath='batch'. In this case, it will read and process all the sample files using the HVSRBatch class. Set verbose=True to see all the information in the sample batch csv file.
- source : str, optional
- _description_, by default 'file'
- verbose : bool, optional
- _description_, by default False
- **kwargs
- Keyword arguments for the functions listed above. The keyword arguments are unique, so they will get parsed out and passed into the appropriate function.
-
- Returns
- -------
- hvsr_results : sprit.HVSRData or sprit.HVSRBatch object
- If a single file/data point is being processed, a HVSRData object will be returned. Otherwise, it will be a HVSRBatch object. See their documention for more information.
-
- Raises
- ------
- RuntimeError
- If the input parameter may not be read correctly. This is raised if the input_params() function fails. This raises an error since no other data processing or reading steps will be able to carried out correctly.
- RuntimeError
- If the data is not read/fetched correctly using fetch_data(), an error will be raised. This is raised if the fetch_data() function fails. This raises an error since no other data processing steps will be able to carried out correctly.
- RuntimeError
- If the data being processed is a single file, an error will be raised if generate_ppsds() does not work correctly. No errors are raised for remove_noise() errors (since that is an optional step) and the process_hvsr() step (since that is the last processing step) .
- """
-
- if 'hvsr_band' not in kwargs.keys():
- kwargs['hvsr_band'] = inspect.signature(input_params).parameters['hvsr_band'].default
- if 'peak_freq_range' not in kwargs.keys():
- kwargs['peak_freq_range'] = inspect.signature(input_params).parameters['peak_freq_range'].default
-
- #Get the input parameters
- input_params_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in tuple(inspect.signature(input_params).parameters.keys())}
- try:
- params = input_params(datapath=datapath, verbose=verbose, **input_params_kwargs)
- except:
- #Even if batch, this is reading in data for all sites so we want to raise error, not just warn
- raise RuntimeError('Input parameters not read correctly, see sprit.input_params() function and parameters')
- #If input_params fails, initialize params as an HVSRDATA
- params = {'ProcessingStatus':{'InputParamsStatus':False, 'OverallStatus':False}}
- params.update(input_params_kwargs)
- params = sprit_utils.make_it_classy(params)
-
- #Fetch Data
- try:
- fetch_data_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in tuple(inspect.signature(fetch_data).parameters.keys())}
- dataIN = fetch_data(params=params, source=source, verbose=verbose, **fetch_data_kwargs)
- except:
- #Even if batch, this is reading in data for all sites so we want to raise error, not just warn
- raise RuntimeError('Data not read correctly, see sprit.fetch_data() function and parameters for more details.')
-
- # Remove Noise
- try:
- remove_noise_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in tuple(inspect.signature(remove_noise).parameters.keys())}
- data_noiseRemoved = remove_noise(hvsr_data=dataIN, verbose=verbose,**remove_noise_kwargs)
- except:
- data_noiseRemoved = dataIN
-
- #Reformat data so HVSRData and HVSRBatch data both work here
- if isinstance(data_noiseRemoved, HVSRData):
- data_noiseRemoved = {'place_holder_sitename':data_noiseRemoved}
- dataIN = {'place_holder_sitename':dataIN}
-
- for site_name in data_noiseRemoved.keys():
- data_noiseRemoved[site_name]['ProcessingStatus']['RemoveNoiseStatus']=False
- #Since noise removal is not required for data processing, check others first
- if dataIN[site_name]['ProcessingStatus']['OverallStatus']:
- data_noiseRemoved[site_name]['ProcessingStatus']['OverallStatus'] = True
- else:
- data_noiseRemoved[site_name]['ProcessingStatus']['OverallStatus'] = False
-
- #If it wasn't originally HVSRBatch, make it HVSRData object again
- if not data_noiseRemoved[site_name]['batch']:
- data_noiseRemoved = data_noiseRemoved[site_name]
-
- # Generate PPSDs
- try:
- generate_ppsds_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in tuple(inspect.signature(generate_ppsds).parameters.keys())}
- PPSDkwargs = {k: v for k, v in locals()['kwargs'].items() if k in tuple(inspect.signature(PPSD).parameters.keys())}
- generate_ppsds_kwargs.update(PPSDkwargs)
- ppsd_data = generate_ppsds(hvsr_data=data_noiseRemoved, verbose=verbose,**generate_ppsds_kwargs)
- except Exception as e:
- if source == 'file' or source=='raw':
- if hasattr(e, 'message'):
- errMsg = e.message
- else:
- errMsg = e
- raise RuntimeError(f"generate_ppsds() error: {errMsg}")
-
- #Reformat data so HVSRData and HVSRBatch data both work here
- ppsd_data = data_noiseRemoved
- if isinstance(ppsd_data, HVSRData):
- ppsd_data = {'place_holder_sitename':ppsd_data}
-
- for site_name in ppsd_data.keys(): #This should work more or less the same for batch and regular data now
- ppsd_data[site_name]['ProcessingStatus']['PPSDStatus']=False
- ppsd_data[site_name]['ProcessingStatus']['OverallStatus'] = False
-
- #If it wasn't originally HVSRBatch, make it HVSRData object again
- if not ppsd_data[site_name]['batch']:
- ppsd_data = ppsd_data[site_name]
-
- # Remove Outlier Curves
- try:
- remove_outlier_curve_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in tuple(inspect.signature(remove_outlier_curves).parameters.keys())}
- data_curvesRemoved = remove_outlier_curves(hvsr_data=ppsd_data, verbose=verbose,**remove_outlier_curve_kwargs)
- except Exception as e:
- traceback.print_exception(sys.exc_info()[1])
- exc_type, exc_obj, tb = sys.exc_info()
- f = tb.tb_frame
- lineno = tb.tb_lineno
- filename = f.f_code.co_filename
- errLineNo = str(traceback.extract_tb(sys.exc_info()[2])[-1].lineno)
- error_category = type(e).__name__.title().replace('error', 'Error')
- error_message = f"{e} ({errLineNo})"
- print(f"{error_category} ({errLineNo}): {error_message}")
- print(lineno, filename, f)
-
- #Reformat data so HVSRData and HVSRBatch data both work here
- data_curvesRemoved = ppsd_data
- if isinstance(data_curvesRemoved, HVSRData):
- data_curvesRemoved = {'place_holder_sitename':data_curvesRemoved}
-
- for site_name in data_curvesRemoved.keys(): #This should work more or less the same for batch and regular data now
- data_curvesRemoved[site_name]['ProcessingStatus']['RemoveOutlierCurvesStatus'] = False
- data_curvesRemoved[site_name]['ProcessingStatus']['OverallStatus'] = False
-
- #If it wasn't originally HVSRBatch, make it HVSRData object again
- if not data_curvesRemoved[site_name]['batch']:
- data_curvesRemoved = data_curvesRemoved[site_name]
-
- # Process HVSR Curves
- try:
- process_hvsr_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in tuple(inspect.signature(process_hvsr).parameters.keys())}
- hvsr_results = process_hvsr(hvsr_data=ppsd_data, verbose=verbose,**process_hvsr_kwargs)
- except Exception as e:
- traceback.print_exception(sys.exc_info()[1])
- exc_type, exc_obj, tb = sys.exc_info()
- f = tb.tb_frame
- lineno = tb.tb_lineno
- filename = f.f_code.co_filename
- errLineNo = str(traceback.extract_tb(sys.exc_info()[2])[-1].lineno)
- error_category = type(e).__name__.title().replace('error', 'Error')
- error_message = f"{e} ({errLineNo})"
- print(f"{error_category} ({errLineNo}): {error_message}")
- print(lineno, filename, f)
-
- hvsr_results = ppsd_data
- if isinstance(hvsr_results, HVSRData):
- hvsr_results = {'place_holder_sitename':hvsr_results}
-
- for site_name in hvsr_results.keys(): #This should work more or less the same for batch and regular data now
-
- hvsr_results[site_name]['ProcessingStatus']['HVStatus']=False
- hvsr_results[site_name]['ProcessingStatus']['OverallStatus'] = False
-
- #If it wasn't originally HVSRBatch, make it HVSRData object again
- if not hvsr_results[site_name]['batch']:
- hvsr_results = hvsr_results[site_name]
-
- #Final post-processing/reporting
-
- # Check peaks
- check_peaks_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in tuple(inspect.signature(check_peaks).parameters.keys())}
- hvsr_results = check_peaks(hvsr_data=hvsr_results, verbose=verbose, **check_peaks_kwargs)
-
- get_report_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in tuple(inspect.signature(get_report).parameters.keys())}
- get_report(hvsr_results=hvsr_results, verbose=verbose, **get_report_kwargs)
-
- if verbose:
- if 'report_format' in get_report_kwargs.keys():
- #if report_format is 'print', we would have already printed it in previous step
- if get_report_kwargs['report_format']=='print' or 'print' in get_report_kwargs['report_format'] or isinstance(hvsr_results, HVSRBatch):
- #We do not need to print another report if already printed to terminal
- pass
- else:
- #We will just change the report_format kwarg to print, since we already got the originally intended report format above,
- # now need to print for verbose output
- get_report_kwargs['report_format']='print'
- get_report(hvsr_results=hvsr_results, **get_report_kwargs)
-
- if get_report_kwargs['report_format']=='plot' or 'plot' in get_report_kwargs['report_format']:
- #We do not need to plot another report if already plotted
- pass
- else:
- #hvplot_kwargs = {k: v for k, v in locals()['kwargs'].items() if k in plot_hvsr.__code__.co_varnames}
- #hvsr_results['HV_Plot'] = plot_hvsr(hvsr_results, return_fig=True, show=False, close_figs=True)
- pass
- else:
- pass
-
- #Export processed data if export_path(as pickle currently, default .hvsr extension)
- if 'export_path' in kwargs.keys():
- if kwargs['export_path'] is None:
- pass
- else:
- if 'ext' in kwargs.keys():
- ext = kwargs['ext']
- else:
- ext = 'hvsr'
- export_data(hvsr_data=hvsr_results, export_path=kwargs['export_path'], ext=ext, verbose=verbose)
-
- return hvsr_results
-
def time_it(_t, proc_name='', verbose=True)
Computes elapsed time since the last call.
-
-
-Expand source code
-
-def time_it(_t, proc_name='', verbose=True):
- """Computes elapsed time since the last call."""
- t1 = datetime.datetime.now().time()
- dt = t1 - _t
- t = _t
- if dt > 0.05:
- if verbose:
- print(f'[ELAPSED TIME] {dt:0.1f} s', flush=True)
- t = t1
- return t
-
def x_mark(incolor=False, inTerminal=False)
@@ -7241,23 +1159,6 @@ Raises
The default Windows terminal is not able to display the check mark character correctly.
This function returns another displayable character if platform is Windows
-
-
-Expand source code
-
-def x_mark(incolor=False, inTerminal=False):
- """The default Windows terminal is not able to display the check mark character correctly.
- This function returns another displayable character if platform is Windows"""
-
- if incolor:
- try:
- xmark = get_char(u'\u274C')
- except:
- xmark = get_char(u'\u2718')
- else:
- xmark = get_char(u'\u2718')
- return xmark
-
HVSRBatch is the data container used for batch processing. It contains several HVSRData objects (one for each site). These can be accessed using their site name, either square brackets (HVSRBatchVariable["SiteName"]) or the dot (HVSRBatchVariable.SiteName) accessor.
+HVSRBatch is the data container used for batch processing. +It contains several HVSRData objects (one for each site). +These can be accessed using their site name, +either square brackets (HVSRBatchVariable["SiteName"]) or the dot (HVSRBatchVariable.SiteName) accessor.
The dot accessor may not work if there is a space in the site name.
All of the -functions in the sprit.pacakge are designed to perform the bulk of their operations iteratively on the individual HVSRData objects contained in the HVSRBatch object, and do little with the HVSRBatch object itself, besides using it determine which sites are contained within it.
class HVSRBatch:
- """HVSRBatch is the data container used for batch processing. It contains several HVSRData objects (one for each site). These can be accessed using their site name, either square brackets (HVSRBatchVariable["SiteName"]) or the dot (HVSRBatchVariable.SiteName) accessor.
+ """HVSRBatch is the data container used for batch processing.
+ It contains several HVSRData objects (one for each site).
+ These can be accessed using their site name,
+ either square brackets (HVSRBatchVariable["SiteName"]) or the dot (HVSRBatchVariable.SiteName) accessor.
The dot accessor may not work if there is a space in the site name.
- All of the functions in the sprit.pacakge are designed to perform the bulk of their operations iteratively on the individual HVSRData objects contained in the HVSRBatch object, and do little with the HVSRBatch object itself, besides using it determine which sites are contained within it.
+ All of the functions in the sprit package are designed to perform the bulk of their operations iteratively
+ on the individual HVSRData objects contained in the HVSRBatch object, and do little with the HVSRBatch object itself,
+ besides using it determine which sites are contained within it.
"""
@check_instance
- def __init__(self, batch_dict, azimuth=None):
+ def __init__(self, batch_dict):
"""HVSR Batch initializer
Parameters
----------
batch_dict : dict
- Dictionary containing Key value pairs with either {sitename:HVSRData object} or {azimuth_angle_degrees:HVSRData object}
- azimuth : None or numeric, default=None
- If None, HVSRBatch object will be a batch of sites. If other value, it should be a list of numeric values of the azimuths (in degrees), by default None.
+ Dictionary containing Key value pairs with {sitename: HVSRData object}
"""
self._batch_dict = batch_dict
self.batch_dict = self._batch_dict
self.batch = True
- self.batch_type = 'sites'
- if azimuth is not None:
- self.batch_type = 'azimuths'
- if self.batch_type=='sites':
- for sitename, hvsrdata in batch_dict.items():
- setattr(self, sitename, hvsrdata)
- self[sitename]['batch']=True
- self.sites = list(self._batch_dict.keys())
- self.azimuths = azimuth # Should be None
- elif self.batch_tupe =='azimuths':
- self.azimuths = azimuth
- self.sites = []
- for az, hvsrdata in batch_dict.items():
- azkey = str(az).zfill(3)
- setattr(self, azkey, hvsrdata)
- self[azkey]['batch']=True
- self.sites.append(hvsrdata['site'])
+ for sitename, hvsrdata in batch_dict.items():
+ setattr(self, sitename, hvsrdata)
+ self[sitename]['batch'] = True
+ self.sites = list(self._batch_dict.keys())
+
#METHODS
def __to_json(self, filepath):
@@ -7332,17 +1229,17 @@ Classes
# dump the JSON string to the file
json.dump(self, f, default=lambda o: o.__dict__, sort_keys=True, indent=4)
- def export(self, export_path=True, ext='hvsr'):
+ def export(self, hvsr_export_path=True, ext='hvsr'):
"""Method to export HVSRData objects in HVSRBatch container to indivdual .hvsr pickle files.
Parameters
----------
- export_path : filepath, default=True
- Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). By default True. If True, it will first try to save each file to the same directory as datapath, then if that does not work, to the current working directory, then to the user's home directory, by default True
+ hvsr_export_path : filepath, default=True
+ Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). By default True. If True, it will first try to save each file to the same directory as input_data, then if that does not work, to the current working directory, then to the user's home directory, by default True
ext : str, optional
The extension to use for the output, by default 'hvsr'. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension.
"""
- export_data(hvsr_data=self, export_path=export_path, ext=ext)
+ export_data(hvsr_data=self, hvsr_export_path=hvsr_export_path, ext=ext)
def keys(self):
"""Method to return the "keys" of the HVSRBatch object. For HVSRBatch objects, these are the site names. Functions similar to dict.keys().
@@ -7380,12 +1277,19 @@ Classes
#Method wrapper of sprit.plot_hvsr function
def plot(self, **kwargs):
- """Method to plot data, based on the sprit.plot_hvsr() function. All the same kwargs and default values apply as plot_hvsr(). For return_fig, returns it to the 'Plot_Report' attribute of each HVSRData object
+ """Method to plot data, based on the sprit.plot_hvsr() function.
+
+ All the same kwargs and default values apply as plot_hvsr().
+ For return_fig, returns it to the 'Plot_Report' attribute of each HVSRData object
Returns
-------
_type_
_description_
+
+ See Also
+ --------
+ plot_hvsr
"""
for sitename in self:
if 'return_fig' in kwargs.keys() and kwargs['return_fig']:
@@ -7402,14 +1306,18 @@ Classes
-------
Variable
May return nothing, pandas.Dataframe, or pyplot Figure, depending on input.
+
+ See Also
+ --------
+ get_report
"""
- if 'report_format' in kwargs.keys():
- if 'csv' == kwargs['report_format']:
+ if 'report_formats' in kwargs.keys():
+ if 'table' == kwargs['report_formats']:
for sitename in self:
rowList = []
rowList.append(get_report(self[sitename], **kwargs))
return pd.concat(rowList, ignore_index=True)
- elif 'plot' == kwargs['report_format']:
+ elif 'plot' == kwargs['report_formats']:
plotDict = {}
for sitename in self:
if 'return_fig' in kwargs.keys() and kwargs['return_fig']:
@@ -7418,17 +1326,25 @@ Classes
get_report(self[sitename], **kwargs)
return plotDict
- #Only report_format left is print, doesn't return anything, so doesn't matter if defalut or not
+ #Only report_formats left is print, doesn't return anything, so doesn't matter if defalut or not
for sitename in self:
get_report(self[sitename], **kwargs)
return
def report(self, **kwargs):
- """Wrapper of get_report()"""
+ """Wrapper of get_report()
+
+ See Also
+ --------
+ get_report
+ """
return self.get_report(**kwargs)
def export_settings(self, site_name=None, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True):
- """Method to export settings from HVSRData object in HVSRBatch object. Simply calls sprit.export_settings() from specified HVSRData object in the HVSRBatch object. See sprit.export_settings() for more details.
+ """Method to export settings from HVSRData object in HVSRBatch object.
+
+ Simply calls sprit.export_settings() from specified HVSRData object in the HVSRBatch object.
+ See sprit.export_settings() for more details.
Parameters
----------
@@ -7442,6 +1358,11 @@ Classes
Whether to include the location information in the instrument settings, if that settings type is selected, by default False
verbose : bool, optional
Whether to print output (filepath and settings) to terminal, by default True
+
+
+ See Also
+ --------
+ export_settings
"""
#If no site name selected, use first site
if site_name is None:
@@ -7471,59 +1392,27 @@ Parameters
type
: str {'shallow', 'deep'}
Based on input, creates either a shallow or deep copy of the HVSRBatch object. Shallow is equivalent of copy.copy(). Input of 'deep' is equivalent of copy.deepcopy() (still experimental). Defaults to shallow.
-
-
-Expand source code
-
-def copy(self, type='shallow'):
- """Make a copy of the HVSRBatch object. Uses python copy module.
-
- Parameters
- ----------
- type : str {'shallow', 'deep'}
- Based on input, creates either a shallow or deep copy of the HVSRBatch object. Shallow is equivalent of copy.copy(). Input of 'deep' is equivalent of copy.deepcopy() (still experimental). Defaults to shallow.
-
- """
- if type.lower()=='deep':
- return HVSRBatch(copy.deepcopy(self._batch_dict))
- else:
- return HVSRBatch(copy.copy(self._batch_dict))
-
-def export(self, export_path=True, ext='hvsr')
+def export(self, hvsr_export_path=True, ext='hvsr')
Method to export HVSRData objects in HVSRBatch container to indivdual .hvsr pickle files.
export_path
: filepath
, default=True
hvsr_export_path
: filepath
, default=True
ext
: str
, optionaldef export(self, export_path=True, ext='hvsr'):
- """Method to export HVSRData objects in HVSRBatch container to indivdual .hvsr pickle files.
-
- Parameters
- ----------
- export_path : filepath, default=True
- Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes). By default True. If True, it will first try to save each file to the same directory as datapath, then if that does not work, to the current working directory, then to the user's home directory, by default True
- ext : str, optional
- The extension to use for the output, by default 'hvsr'. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension.
- """
- export_data(hvsr_data=self, export_path=export_path, ext=ext)
-
def export_settings(self, site_name=None, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True)
Method to export settings from HVSRData object in HVSRBatch object. Simply calls sprit.export_settings() from specified HVSRData object in the HVSRBatch object. See sprit.export_settings() for more details.
+Method to export settings from HVSRData object in HVSRBatch object.
+Simply calls sprit.export_settings() from specified HVSRData object in the HVSRBatch object. +See sprit.export_settings() for more details.
site_name
: str
, default=None
verbose
: bool
, optionaldef export_settings(self, site_name=None, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True):
- """Method to export settings from HVSRData object in HVSRBatch object. Simply calls sprit.export_settings() from specified HVSRData object in the HVSRBatch object. See sprit.export_settings() for more details.
-
- Parameters
- ----------
- site_name : str, default=None
- The name of the site whose settings should be exported. If None, will default to the first site, by default None.
- export_settings_path : str, optional
- Filepath to output file. If left as 'default', will save as the default value in the resources directory. If that is not possible, will save to home directory, by default 'default'
- export_settings_type : str, {'all', 'instrument', 'processing'}, optional
- They type of settings to save, by default 'all'
- include_location : bool, optional
- Whether to include the location information in the instrument settings, if that settings type is selected, by default False
- verbose : bool, optional
- Whether to print output (filepath and settings) to terminal, by default True
- """
- #If no site name selected, use first site
- if site_name is None:
- site_name = self.sites[0]
-
- export_settings(hvsr_data=self[site_name],
- export_settings_path=export_settings_path, export_settings_type=export_settings_type, include_location=include_location, verbose=verbose)
-
def get_report(self, **kwargs)
@@ -7574,39 +1438,9 @@ Returns
Variable
- May return nothing, pandas.Dataframe, or pyplot Figure, depending on input.
-
-
-
-Expand source code
-
-def get_report(self, **kwargs):
- """Method to get report from processed data, in print, graphical, or tabular format.
-
- Returns
- -------
- Variable
- May return nothing, pandas.Dataframe, or pyplot Figure, depending on input.
- """
- if 'report_format' in kwargs.keys():
- if 'csv' == kwargs['report_format']:
- for sitename in self:
- rowList = []
- rowList.append(get_report(self[sitename], **kwargs))
- return pd.concat(rowList, ignore_index=True)
- elif 'plot' == kwargs['report_format']:
- plotDict = {}
- for sitename in self:
- if 'return_fig' in kwargs.keys() and kwargs['return_fig']:
- plotDict[sitename] = get_report(self[sitename], **kwargs)
- else:
- get_report(self[sitename], **kwargs)
- return plotDict
-
- #Only report_format left is print, doesn't return anything, so doesn't matter if defalut or not
- for sitename in self:
- get_report(self[sitename], **kwargs)
- return
-
+
+See Also
+
def items(self)
@@ -7618,20 +1452,6 @@ Returns
_type_
description
-
-
-Expand source code
-
-def items(self):
- """Method to return both the site names and the HVSRData object as a set of dict_items tuples. Functions similar to dict.items().
-
- Returns
- -------
- _type_
- _description_
- """
- return self.batch_dict.items()
-
def keys(self)
@@ -7643,65 +1463,29 @@ Returns
dict_keys
A dict_keys object listing the site names of each of the HVSRData objects contained in the HVSRBatch object
-
-
-Expand source code
-
-def keys(self):
- """Method to return the "keys" of the HVSRBatch object. For HVSRBatch objects, these are the site names. Functions similar to dict.keys().
-
- Returns
- -------
- dict_keys
- A dict_keys object listing the site names of each of the HVSRData objects contained in the HVSRBatch object
- """
- return self.batch_dict.keys()
-
def plot(self, **kwargs)
-Method to plot data, based on the sprit.plot_hvsr() function. All the same kwargs and default values apply as plot_hvsr(). For return_fig, returns it to the 'Plot_Report' attribute of each HVSRData object
+Method to plot data, based on the sprit.plot_hvsr() function.
+All the same kwargs and default values apply as plot_hvsr().
+For return_fig, returns it to the 'Plot_Report' attribute of each HVSRData object
Returns
_type_
- description
-
-
-
-Expand source code
-
-def plot(self, **kwargs):
- """Method to plot data, based on the sprit.plot_hvsr() function. All the same kwargs and default values apply as plot_hvsr(). For return_fig, returns it to the 'Plot_Report' attribute of each HVSRData object
-
- Returns
- -------
- _type_
- _description_
- """
- for sitename in self:
- if 'return_fig' in kwargs.keys() and kwargs['return_fig']:
- self[sitename]['Plot_Report'] = plot_hvsr(self[sitename], **kwargs)
- else:
- plot_hvsr(self[sitename], **kwargs)
-
- return self
-
+
+See Also
+
def report(self, **kwargs)
-Wrapper of get_report()
-
-
-Expand source code
-
-def report(self, **kwargs):
- """Wrapper of get_report()"""
- return self.get_report(**kwargs)
-
+
@@ -7710,7 +1494,8 @@ Returns
(*args, **kwargs)
-HVSRData is the basic data class of the sprit package. It contains all the processed data, input parameters, and reports.
+HVSRData is the basic data class of the sprit package.
+It contains all the processed data, input parameters, and reports.
These attributes and objects can be accessed using square brackets or the dot accessor. For example, to access the site name, HVSRData['site'] and HVSRData.site will both return the site name.
Some of the methods that work on the HVSRData object (e.g., .plot() and .get_report()) are essentially wrappers for some of the main sprit package functions (sprit.plot_hvsr() and sprit.get_report(), respectively)
@@ -7718,18 +1503,13 @@ Returns
Expand source code
class HVSRData:
- """HVSRData is the basic data class of the sprit package. It contains all the processed data, input parameters, and reports.
+ """HVSRData is the basic data class of the sprit package.
+ It contains all the processed data, input parameters, and reports.
These attributes and objects can be accessed using square brackets or the dot accessor. For example, to access the site name, HVSRData['site'] and HVSRData.site will both return the site name.
Some of the methods that work on the HVSRData object (e.g., .plot() and .get_report()) are essentially wrappers for some of the main sprit package functions (sprit.plot_hvsr() and sprit.get_report(), respectively)
"""
- #Old way of using this
- #def __new__(cls, params):
- # if isinstance(params, (cls, HVSRBatch)):
- # return params
- # return super().__new__(cls)
-
@check_instance
def __init__(self, params):
self.params = params
@@ -7765,21 +1545,21 @@ Returns
# dump the JSON string to the file
json.dump(self, f, default=unseriable_fun, sort_keys=True, indent=4)
- def export(self, export_path=None, ext='hvsr'):
+ def export(self, hvsr_export_path=None, ext='hvsr'):
"""Method to export HVSRData objects to .hvsr pickle files.
Parameters
----------
- export_path : filepath, default=True
+ hvsr_export_path : filepath, default=True
Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes).
By default True.
- If True, it will first try to save each file to the same directory as datapath, then if that does not work, to the current working directory, then to the user's home directory, by default True
+ If True, it will first try to save each file to the same directory as input_data, then if that does not work, to the current working directory, then to the user's home directory, by default True
ext : str, optional
The extension to use for the output, by default 'hvsr'. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension.
"""
- export_data(hvsr_data=self, export_path=export_path, ext=ext)
+ export_data(hvsr_data=self, hvsr_export_path=hvsr_export_path, ext=ext)
- #METHODS (many reflect dictionary methods)
+ # METHODS (many reflect dictionary methods)
def keys(self):
"""Method to return the "keys" of the HVSRData object. For HVSRData objects, these are the attributes and parameters of the object. Functions similar to dict.keys().
@@ -7824,6 +1604,11 @@ Returns
Returns
-------
matplotlib.Figure, matplotlib.Axis (if return_fig=True)
+
+ See Also
+ --------
+ plot_hvsr
+ plot_azimuth
"""
if 'close_figs' not in kwargs.keys():
kwargs['close_figs']=True
@@ -7838,12 +1623,21 @@ Returns
-------
Variable
May return nothing, pandas.Dataframe, or pyplot Figure, depending on input.
+
+ See Also
+ --------
+ get_report
"""
report_return = get_report(self, **kwargs)
return report_return
def report(self, **kwargs):
- """Wrapper of get_report()"""
+ """Wrapper of get_report()
+
+ See Also
+ --------
+ get_report
+ """
report_return = get_report(self, **kwargs)
return report_return
@@ -7963,7 +1757,7 @@ Returns
Instance variables
-var batch
+prop batch
-
Whether this HVSRData object is part of an HVSRBatch object. This is used throughout the code to help direct the object into the proper processing pipeline.
Returns
@@ -7987,7 +1781,7 @@ Returns
return self._batch
-var datastream
+prop datastream
-
A copy of the original obspy datastream read in. This helps to retain the original data even after processing is carried out.
Returns
@@ -8011,7 +1805,7 @@ Returns
return self._datastream
-var params
+prop params
-
Dictionary containing the parameters used to process the data
Returns
@@ -8035,7 +1829,7 @@ Returns
return self._params
-var ppsds
+prop ppsds
-
Dictionary copy of the class object obspy.signal.spectral_estimation.PPSD(). The dictionary copy allows manipulation of the data in PPSD, whereas that data cannot be easily manipulated in the original Obspy object.
Returns
@@ -8059,7 +1853,7 @@ Returns
return self._ppsds
-var ppsds_obspy
+prop ppsds_obspy
-
The original ppsd information from the obspy.signal.spectral_estimation.PPSD(), so as to keep original if copy is manipulated/changed.
@@ -8085,57 +1879,21 @@ Parameters
type
: str {'shallow', 'deep'}
- Based on input, creates either a shallow or deep copy of the HVSRData object. Shallow is equivalent of copy.copy(). Input of type='deep' is equivalent of copy.deepcopy() (still experimental). Defaults to shallow.
-
-
-Expand source code
-
-def copy(self, type='shallow'):
- """Make a copy of the HVSRData object. Uses python copy module.
-
- Parameters
- ----------
- type : str {'shallow', 'deep'}
- Based on input, creates either a shallow or deep copy of the HVSRData object. Shallow is equivalent of copy.copy(). Input of type='deep' is equivalent of copy.deepcopy() (still experimental). Defaults to shallow.
-
- """
- if type.lower()=='deep':
- return HVSRData(copy.deepcopy(self.params))
- else:
- return HVSRData(copy.copy(self.params))
-
-def export(self, export_path=None, ext='hvsr')
+def export(self, hvsr_export_path=None, ext='hvsr')
-
Method to export HVSRData objects to .hvsr pickle files.
Parameters
-export_path
: filepath
, default=True
+hvsr_export_path
: filepath
, default=True
- Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes).
By default True.
-If True, it will first try to save each file to the same directory as datapath, then if that does not work, to the current working directory, then to the user's home directory, by default True
+If True, it will first try to save each file to the same directory as input_data, then if that does not work, to the current working directory, then to the user's home directory, by default True
ext
: str
, optional
- The extension to use for the output, by default 'hvsr'. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension.
-
-
-Expand source code
-
-def export(self, export_path=None, ext='hvsr'):
- """Method to export HVSRData objects to .hvsr pickle files.
-
- Parameters
- ----------
- export_path : filepath, default=True
- Filepath to save file. Can be either directory (which will assign a filename based on the HVSRData attributes).
- By default True.
- If True, it will first try to save each file to the same directory as datapath, then if that does not work, to the current working directory, then to the user's home directory, by default True
- ext : str, optional
- The extension to use for the output, by default 'hvsr'. This is still a pickle file that can be read with pickle.load(), but will have .hvsr extension.
- """
- export_data(hvsr_data=self, export_path=export_path, ext=ext)
-
def export_settings(self, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True)
@@ -8153,27 +1911,6 @@ Parameters
verbose
: bool
, optional
- Whether to print output (filepath and settings) to terminal, by default True
-
-
-Expand source code
-
-def export_settings(self, export_settings_path='default', export_settings_type='all', include_location=False, verbose=True):
- """Method to export settings from HVSRData object. Simply calls sprit.export_settings() from the HVSRData object. See sprit.export_settings() for more details.
-
- Parameters
- ----------
- export_settings_path : str, optional
- Filepath to output file. If left as 'default', will save as the default value in the resources directory. If that is not possible, will save to home directory, by default 'default'
- export_settings_type : str, {'all', 'instrument', 'processing'}, optional
- They type of settings to save, by default 'all'
- include_location : bool, optional
- Whether to include the location information in the instrument settings, if that settings type is selected, by default False
- verbose : bool, optional
- Whether to print output (filepath and settings) to terminal, by default True
- """
- export_settings(hvsr_data=self,
- export_settings_path=export_settings_path, export_settings_type=export_settings_type, include_location=include_location, verbose=verbose)
-
def get_report(self, **kwargs)
@@ -8184,22 +1921,9 @@ Returns
Variable
- May return nothing, pandas.Dataframe, or pyplot Figure, depending on input.
-
-
-
-Expand source code
-
-def get_report(self, **kwargs):
- """Method to get report from processed data, in print, graphical, or tabular format.
-
- Returns
- -------
- Variable
- May return nothing, pandas.Dataframe, or pyplot Figure, depending on input.
- """
- report_return = get_report(self, **kwargs)
- return report_return
-
+
+See Also
+
def items(self)
@@ -8211,20 +1935,6 @@ Returns
dict_items
- A dict_items object of the HVSRData objects attributes, parameters, etc.
-
-
-Expand source code
-
-def items(self):
- """Method to return the "items" of the HVSRData object. For HVSRData objects, this is a dict_items object with the keys and values in tuples. Functions similar to dict.items().
-
- Returns
- -------
- dict_items
- A dict_items object of the HVSRData objects attributes, parameters, etc.
- """
- return self.params.items()
-
def keys(self)
@@ -8236,24 +1946,6 @@ Returns
dict_keys
A dict_keys object of the HVSRData objects attributes, parameters, etc.
-
-
-Expand source code
-
-def keys(self):
- """Method to return the "keys" of the HVSRData object. For HVSRData objects, these are the attributes and parameters of the object. Functions similar to dict.keys().
-
- Returns
- -------
- dict_keys
- A dict_keys object of the HVSRData objects attributes, parameters, etc.
- """
- keyList = []
- for k in dir(self):
- if not k.startswith('_'):
- keyList.append(k)
- return keyList
-
def plot(self, **kwargs)
@@ -8264,39 +1956,18 @@ Returns
matplotlib.Figure, matplotlib.Axis (if return_fig=True)
-
-
-
-
-Expand source code
-
-def plot(self, **kwargs):
- """Method to plot data, wrapper of sprit.plot_hvsr()
-
- Returns
- -------
- matplotlib.Figure, matplotlib.Axis (if return_fig=True)
- """
- if 'close_figs' not in kwargs.keys():
- kwargs['close_figs']=True
- plot_return = plot_hvsr(self, **kwargs)
- plt.show()
- return plot_return
-
+
+See Also
+
def report(self, **kwargs)
-Wrapper of get_report()
-
-
-Expand source code
-
-def report(self, **kwargs):
- """Wrapper of get_report()"""
- report_return = get_report(self, **kwargs)
- return report_return
-
+
@@ -8304,7 +1975,6 @@ Returns
-
\ No newline at end of file
+