diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..5dda736 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "pytides"] + path = pytides + url = https://github.com/clawpack/pytides.git diff --git a/pytides b/pytides new file mode 160000 index 0000000..f3317c0 --- /dev/null +++ b/pytides @@ -0,0 +1 @@ +Subproject commit f3317c04886fe4397febcfc507f42474ab5bb4ac diff --git a/pytides_examples/Tide_Module_Examples_pytides.ipynb b/pytides_examples/Tide_Module_Examples_pytides.ipynb new file mode 100644 index 0000000..2e908c5 --- /dev/null +++ b/pytides_examples/Tide_Module_Examples_pytides.ipynb @@ -0,0 +1,482 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Example of Tide Prediction For One Date Instance\n", + "\n", + "- In this example, method used to predict tide is adapated from Pytides\n", + "- This implementation will only work for known NOAA gauge stations\n", + "- Harmonic Constituents data is scraped from NOAA. \n", + "\n", + "Adapted by @rjleveque from notebook of @socoyjonathan to test local pytides and new tidetools.py." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Tide Prediction Module Functions" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + " - retrieve_constituents - retrieves harmonic constituents from NOAA gauge station\n", + " - fetch_noaa_tide_data() - retrieves datetimes, water levels and tide predictions at given NOAA tide station from NOAA's API\n", + " \n", + " - datetimes - prepares a collection of datetimes from beginning to end dates if needed\n", + " \n", + " - predict_tide() - predicts tide for desired NOAA station given station ID, start date and end date for prediction \n", + " - datum_value - retrieves datum value for desired datum reference, utilized by predict_tide to obtain MTL value\n", + " - detide() - detides observed water levels with predicted tide\n", + " - surge() - predicts surge at NOAA gauge station provided station ID, start date, end date, and landfall date, best for a Clawpack Simulation!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import datetime\n", + "\n", + "#from clawpack.geoclaw import tidetools\n", + "\n", + "# Eventually move tidetools.py to geoclaw\n", + "# For development purposes, this is temporarily in this repo:\n", + "\n", + "import os, sys\n", + "pathstr = os.path.abspath('..')\n", + "if pathstr not in sys.path:\n", + " sys.path.insert(0,pathstr)\n", + "import tidetools\n", + "\n", + "print('Using tidetools from: %s' % tidetools.__file__)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### **** Station Information ****" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Locate NOAA station ID. NOAA gauge stations home: https://tidesandcurrents.noaa.gov/
\n", + "Fill in station ID, reference datum and date instance for tide prediction!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Station Information\n", + "station_id = '8761724'\n", + "datum = 'MTL'\n", + "\n", + "#Date of prediction (YEAR, MTH, DAY, HR)\n", + "prediction_date = datetime.datetime(2005, 8, 29, 11)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tide Prediction" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Prediction of tide at specified location (station ID) and specified time (GMT) implemented below by calling predict_tide() method with the following arguments: station_id, beg_prediction_date, end_prediction_date. Note: datum argument is optional\n", + "\n", + "
\n", + "\n", + "To predict tide at an instant, set beg_prediction_date and end_prediction_date in predict_tide() method to the same date!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#NOAA Data Scraping Implementation \n", + "height = tidetools.predict_tide(station_id, prediction_date, prediction_date, datum='MTL')\n", + "times = tidetools.datetimes(prediction_date, prediction_date) # in meters\n", + "print(height[0], \"meters\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "*******************************************************************************************************************" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Example of Tide Prediction In A Date Interval " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Station Information " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Fill in station ID, a beginning date and an end date for tide prediction below" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Station Information\n", + "station_id = '8761724'\n", + "datum = 'MTL'\n", + "\n", + "#Beginning and End Dates \n", + "beg_date = datetime.datetime(2005, 8, 26, hour=0)\n", + "end_date = datetime.datetime(2005, 8, 31, hour=0)\n", + "\n", + "#Predict tide with arguments set as: (station_id, beg_prediction_date, end_prediction_date)\n", + "predicted_tide = tidetools.predict_tide(station_id, beg_date, end_date, datum='MTL')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tide Predictions\n", + "Plot results in a time series plot" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Method datetimes() makes a range of datetimes given arguments: (beg_prediction_date, end_prediction_date)\n", + "times = tidetools.datetimes(beg_date, end_date)\n", + "\n", + "plt.figure(figsize=(20,10))\n", + "plt.plot(times, predicted_tide, \"-\", label=\"Tide Prediction\")\n", + "plt.xlabel('Hours since ' + str(beg_date) + ' (GMT)')\n", + "plt.ylabel('Meters'), plt.margins(x=0), plt.legend(loc = 'best')\n", + "plt.title('My Prediction for Station {}'.format(station_id))\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "*******************************************************************************************************************" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Example Comparing NOAA vs Our Tide Prediction In A Date Interval " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Station Information\n", + "station_id = '8761724'\n", + "datum = 'MTL'\n", + "\n", + "#Beginning and End Dates \n", + "beg_date = datetime.datetime(2005, 8, 26)\n", + "end_date = datetime.datetime(2005, 8, 31)\n", + "\n", + "#Predict Tide \n", + "predicted_tide = tidetools.predict_tide(station_id, beg_date, end_date, datum='MTL')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "- Calling function fetch_noaa_tide_data() with arguments set as (station_id, beg_prediction_date, end_prediction_date) retrieves datetimes, water levels and tide predictions for the specified NOAA station in the date interval provided from NOAA's API\n", + "- Data is scraped in Metric units, GMT timezone, MTL datum and 6 min intervals. These arguments are optional in fetch_noaa_tide_data()." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "RJL: I'd suggest breaking the line in the next cell so it fits better when viewed in a narrow browser window. There are also a couple other places below where long lines could be split." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Retrieve NOAA Tide Data\n", + "times, NOAA_observed_water_lvl, NOAA_predicted_tide = \\\n", + " tidetools.fetch_noaa_tide_data(station_id, beg_date, end_date, datum='MTL')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Plot Comparisons\n", + "plt.figure(figsize=(20,10))\n", + "plt.plot(times, predicted_tide, \"-\", label=\"Our Tide Prediction\")\n", + "plt.plot(times, NOAA_predicted_tide, \"-\", label=\"NOAA Tide Prediction\")\n", + "plt.plot(times, NOAA_observed_water_lvl, \"-\", label=\"NOAA Water Level Observation\")\n", + "plt.xlabel('Hours since ' + str(beg_date) + ' (GMT)')\n", + "plt.ylabel('Metres'), plt.margins(x=0), plt.legend(loc = 'best')\n", + "plt.title('Comparison of Our Prediction vs NOAA prediction for Station {}'.format(station_id))\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "*******************************************************************************************************************" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "RJL: I think you need some explanation either here or at the start of this example of what you are showing, i.e. that you have chose and date and location where there was a bit storm surge (from what event?) so the reader knows what they are looking at. And below you might comment that the reason for detiding the observations might be to compare the surge alone with GeoClaw simulation results, for example." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Example Detiding and Capturing A Surge for a Gauge Station " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "- Calling predict_tide() method with arguments set as: (station_id, beg_prediction_date, end_prediction_date) outputs predicted tide\n", + "- Calling fetch_noaa_tide_data() with arguments set as (station_id, beg_prediction_date, end_prediction_date) retrieves datetimes, water levels and tide predictions from NOAA\n", + "- Calling detide() method with arguments set as: (NOAA observed water level, predicted tide) will output detided water level. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Station Information\n", + "station_id = '8761724'\n", + "datum = 'MTL'\n", + "\n", + "#Beginning and End Dates \n", + "beg_date = datetime.datetime(2005, 8, 26)\n", + "end_date = datetime.datetime(2005, 8, 31)\n", + "\n", + "predicted_tide = tidetools.predict_tide(station_id, beg_date, end_date)\n", + "times, NOAA_observed_water_lvl, NOAA_predicted_tide = \\\n", + " tidetools.fetch_noaa_tide_data(station_id, beg_date, end_date, datum='MTL')\n", + "\n", + "surge = tidetools.detide(NOAA_observed_water_lvl, predicted_tide)\n", + "\n", + "#Plot Comparisons\n", + "plt.figure(figsize=(20,10))\n", + "plt.plot(times, surge, \"-\", label=\"Our Surge Prediction\")\n", + "plt.xlabel('Hours since ' + str(beg_date) + ' (GMT)')\n", + "plt.ylabel('Metres'), plt.margins(x=0), plt.legend(loc = 'best')\n", + "plt.title('Detided Water Level for Station {}'.format(station_id))\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "*******************************************************************************************************************" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Example for Clawpack Storm Surge Implementation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "RJL: It's not clear what you mean here by \"storm surge implementation\". It seems like you are talking about comparing the NOAA gauges results with the surge simulated by GeoClaw, but I don't think you are showing any GeoClaw results in this notebook. This might not be clear to a reader, and when you label some curves with e.g. \"our detided prediction\" they might think that's the GeoClaw simulated surge, whereas I think you mean the NOAA observations minus the predicted tide that you computed from the constituents using the new `tide.py` (as opposed to the NOAA predictions downloaded from the webpage)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "- Code below works best if placed in gauge_afteraxes( ) in setplot.py for a storm simulation.\n", + "- Calling surge() method with arguments set as: (station_id, beginning_date, end_date, landfall_date) will output storm surge from NOAA observed water levels and predicted tide." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Station Information\n", + "station_id = '8761724'\n", + "\n", + "#Beginning, End, Landfall Dates\n", + "beg_date = datetime.datetime(2005, 8, 26)\n", + "end_date = datetime.datetime(2005, 8, 31)\n", + "landfall_date = datetime.datetime(2005, 8, 29, 11, 10)\n", + "\n", + "# Surge Prediction\n", + "times, surge = tidetools.surge(station_id, beg_date, end_date, landfall_date)\n", + "plt.plot(times, surge, color=\"b\", label=\"Our Prediction\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "*******************************************************************************************************************" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Example Iterating Through A Library Of Stations And Date Intervals" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "RJL: Again you might add some more discussion here, saying that these particular stations and dates correspond to particular surge events (which is sort of clear from the comments such as `#katrina` in the cell below, but could be explained more, perhaps with links to NOAA pages or elsewhere describing these events?)\n", + "\n", + "RJL: Also, in a lot of figures below it looks like the observations were pretty far from the predicted tides even before the big surge arrived, is that because the storm was already having a significant effect (and hence this deviation from predictions should be preserved in the de-tided version that you will compare to GeoClaw results, since it should also be seen in the simulations presumably) or is this because the predictions are poor for some other reason? It would be valuable to explain this a bit more. In the case of tsunamis, often nothing unusual happens until the first big wave arrives, but surge is probably different." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "station_dict = {'8761724': ('Grand Isle, LA', (2005, 8, 26), (2005, 8, 31), (2005, 8, 29, 11, 10)), #katrina\n", + " '8760922': ('Pilots Station East, SW Pass, LA', (2005, 8, 26), (2005, 8, 31), (2005, 8, 29, 11)), #michael\n", + " '8658120': ('Wilmington, NC', (2016, 10, 6, 12), (2016, 10, 9, 12), (2016, 10, 8, 12)), #matthew\n", + " '8721604': ('Trident Pier, Port Canaveral, FL', (2019, 8, 24), (2019, 9, 9), (2019, 9, 4, 12)), #dorian\n", + " '8723970': ('Vaca Key, Florida Bay, FL', (2017, 9, 6, 13), (2017, 9, 12, 13), (2017, 9, 10, 13)) #irma\n", + " }\n", + "\n", + "for (key, value) in station_dict.items():\n", + " station_id = key\n", + " station_name = value[0]\n", + " beg_date = datetime.datetime(*value[1])\n", + " end_date = datetime.datetime(*value[2])\n", + " landfall_date = datetime.datetime(*value[3])\n", + " \n", + " #NOAA Data Scraping Implementation\n", + " predicted_tide = tidetools.predict_tide(station_id, beg_date, end_date) \n", + " \n", + " times, NOAA_observed_water_lvl, NOAA_predicted_tide = \\\n", + " tidetools.fetch_noaa_tide_data(station_id, beg_date, end_date, datum='MTL')\n", + "\n", + " #Detide Water Level\n", + " surge = tidetools.detide(NOAA_observed_water_lvl, predicted_tide)\n", + " NOAA_surge = tidetools.detide(NOAA_observed_water_lvl, NOAA_predicted_tide)\n", + " \n", + " #Plot Comparisons\n", + " plt.figure(figsize=(20,10))\n", + " plt.plot(times, predicted_tide, \"-\", label=\"Our Tide Prediction\")\n", + " plt.plot(times, NOAA_predicted_tide, \"-\", label=\"NOAA Tide Prediction\")\n", + " plt.plot(times, NOAA_observed_water_lvl, \"-\", label=\"NOAA Water Level Observation\")\n", + " plt.xlabel('Hours since ' + str(beg_date) + ' (GMT)')\n", + " plt.ylabel('Metres'), plt.margins(x=0), plt.legend(loc = 'best')\n", + " plt.title('Comparison of Our Prediction vs NOAA prediction for Station {}, {}'.format(station_id, station_name))\n", + " plt.show()\n", + " \n", + " #Detided Water Level Comparison\n", + " plt.figure(figsize=(20,10))\n", + " plt.plot(times, surge, \"-\", label=\"Our Detided Prediction\")\n", + " plt.plot(times, NOAA_surge, \"-\", label=\"NOAA's Detided Prediction\")\n", + " plt.xlabel('Hours since ' + str(beg_date) + ' (GMT)')\n", + " plt.ylabel('Metres'), plt.margins(x=0), plt.legend(loc = 'best')\n", + " plt.title('Detided Water Level Comparison of Our Prediction vs NOAA prediction for Station {}, {}'.format(station_id, station_name))\n", + " plt.show()\n", + " \n", + " \n", + " #### Clawpack Implementation (in setplot.py) ####\n", + " times, surge = tidetools.surge(station_id, beg_date, end_date, landfall_date)\n", + " plt.plot(times, surge, color=\"b\", label=\"Our Surge Prediction\")\n", + " \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/pytides_examples/demo_noaa_constituents.ipynb b/pytides_examples/demo_noaa_constituents.ipynb new file mode 100644 index 0000000..4bf8060 --- /dev/null +++ b/pytides_examples/demo_noaa_constituents.ipynb @@ -0,0 +1,461 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# demo_noaa_constituents -- Example Pytides Usage\n", + "\n", + "Based loosely on the example by Sam Cox on the Pytides wiki, [How to use the NOAA's published Harmonic Constituents in Python with Pytides](https://github.com/sam-cox/pytides/wiki/How-to-use-the-NOAA's-published-Harmonic-Constituents-in-Python-with-Pytides).\n", + "\n", + "This example uses the [NOAA constituents published for King's Point, NY](https://tidesandcurrents.noaa.gov/harcon.html?unit=0&timezone=0&id=8516945&name=Kings+Point&state=NY). These have been typed into this notebook below. \n", + "\n", + "Code at the bottom of this notebook illustrates how to download the harmonic consituents directly, a better approach in general since it is easier, less error-prone, and provides more digits of precision than the published tables.\n", + "\n", + "To find the station number and the webpages for other stations, see [this NOAA page](https://tidesandcurrents.noaa.gov/stations.html?type=Water%20Levels). After going to one of the station webpages, you will find links to the harmonic constituents and datums near the bottom of the page. Make sure you select \"meters\" as the units and the desired time zone (local or GMT) and then refresh the page if necessary.\n", + "\n", + "For more about tidal constituents, see for example:\n", + " - [wikipedia page](https://en.wikipedia.org/wiki/Theory_of_tides#Tidal_constituents)\n", + " - [NOAA page](https://tidesandcurrents.noaa.gov/about_harmonic_constituents.html)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from datetime import datetime\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Import local submodule version of Pytides\n", + "\n", + "The clawpack version includes some fixes to the original code needed to make it work in Python3." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Here's what we'd like to do: (?)\n", + "#from clawpack.pytides.tide import Tide\n", + "#import clawpack.pytides.constituent as cons\n", + "#\n", + "\n", + "# For now, hardwire in the path...\n", + "import sys, os\n", + "CLAW = os.environ['CLAW'] # path to Clawpack files\n", + "pathstr = os.path.join(CLAW, 'tidal-examples/pytides')\n", + "assert os.path.isdir(pathstr), '*** Need clawpack/tidal-examples/pytides ***'\n", + "print('Using Pytides from: %s' % pathstr)\n", + "if pathstr not in sys.path:\n", + " sys.path.insert(0,pathstr)\n", + " \n", + "from pytides.tide import Tide\n", + "import pytides.constituent as cons" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here are the NOAA constituents, in the order presented on their website for this particular station.\n", + "\n", + "We omit the Z0 component, which is 0 relative to MSL and will be adjusted below to present results relative to a different datum, e.g. MLLW." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "constituents = [c for c in cons.noaa if c != cons._Z0]\n", + "\n", + "#Phases and amplitudes (relative to GMT and in degrees and metres)\n", + "published_phases = [115.7,140.7,92.6,192,145.5,220.6,159.9,202.8,152.3,\\\n", + " 117.2,92,0,0,69.7,224.5,141.7,121.9,\\\n", + " 228.4,252.1,0,60.1,135.5,0,0,204.5,212.2,112.3,\\\n", + " 141.8,249.1,211.1,75.1,181.4,140.4,202.4,141.8,155,160.9]\n", + "\n", + "published_amplitudes = [1.142,0.189,0.241,0.1,0.036,0.066,0.08,0.01,0.004,\\\n", + " 0.022,0.052,0,0,0.03,0.007,0.025,0.009,\\\n", + " 0.005,0.008,0,0.024,0.065,0,0,0.004,0.017,0.015,\\\n", + " 0.002,0.002,0.032,0.003,0.007,0.07,0.009,0.053,\\\n", + " 0.007,0.008]\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Print out the constintuents for easy comparison with the [NOAA constituents page for station 8516945, Kings Point, NY](https://tidesandcurrents.noaa.gov/harcon.html?unit=0&timezone=0&id=8516945&name=Kings+Point&state=NY). Note that some of the names are slightly different from the NOAA names:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print('# Name Amplitude Phase')\n", + "for k,c in enumerate(constituents):\n", + " print('%s %s %.3f %7.3f' \\\n", + " % (str(k+1).ljust(4), c.name.ljust(7), published_amplitudes[k], published_phases[k]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can add a constant offset. The published constituents are relative to MSL (not MTL as stated in the Pytides wiki example). Here we set the offset so that the plots will be relative to MLLW instead. These values can be found on the [NOAA datums page for this station](https://tidesandcurrents.noaa.gov/datums.html?datum=STND&units=1&epoch=0&id=8516945&name=Kings+Point&state=NY). Note that these values are relative to the station datum (STND) although the offset computed should be the same as long as the values of both `MSL` and `MLLW` used are relative to the same datum. Also make sure `meters` is selected when looking at datums (and at constituents)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "MSL = 5.113\n", + "MLLW = 3.927\n", + "offset = MSL - MLLW\n", + "constituents.append(cons._Z0)\n", + "published_phases.append(0)\n", + "published_amplitudes.append(offset)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Build the model, and a tide instance:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert(len(constituents)==len(published_phases)==len(published_amplitudes))\n", + "model = np.zeros(len(constituents), dtype = Tide.dtype)\n", + "model['constituent'] = constituents\n", + "model['amplitude'] = published_amplitudes\n", + "model['phase'] = published_phases\n", + "\n", + "#Build a TIDE INSTANCE called tide from the MODEL called model\n", + "tide = Tide(model=model,radians=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print('Predicted tide on January 1, 2013 relative to MLLW...')\n", + "print(' at 00:00 GMT: %.3fm\\n at 06:00 GMT: %.3fm' \\\n", + " % tuple(tide.at([datetime(2013,1,1,0,0,0), datetime(2013,1,1,6,0,0)])))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "The [actual NOAA prediction](https://tidesandcurrents.noaa.gov/waterlevels.html?id=8516945&units=metric&bdate=20130101&edate=20130102&timezone=GMT&datum=MLLW&interval=6&action=) for 0000 and 0600 GMT on January 1 2013 are -0.079m and 2.206m relative to MLLW." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Produce plots over a time range:\n", + "\n", + "We can produce plots similar to the [actual NOAA prediction](https://tidesandcurrents.noaa.gov/waterlevels.html?id=8516945&units=metric&bdate=20130101&edate=20130102&timezone=GMT&datum=MLLW&interval=6&action=) over a couple of days." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "prediction_t0 = datetime(2013,1,1,0,0,0)\n", + "prediction_end= datetime(2013,1,3,0,0,0)\n", + "hrs=((prediction_end - prediction_t0).total_seconds())/3600.\n", + "print ('The data started at datetime: ',prediction_t0)\n", + "print ('The data ended at datetime: ',prediction_end)\n", + "print ('The data spanned %5i hours' %int(hrs))\n", + "print (' ')\n", + "hours = 0.1*np.arange(int(hrs)*10)\n", + "times = Tide._times(prediction_t0, hours)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Evaluate the tide instance at the specified times" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "my_prediction = tide.at(times)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Find the high tides and low tides, and print out a tide table:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Find the highs and lows using tide.extrema generator function\n", + "# of the tidal instance called tide. Save ext_hrs and ext_hts\n", + "# as numpy arrays for plotting later.\n", + "\n", + "ext_vals=[t for t in tide.extrema(prediction_t0,prediction_end)]\n", + "print ('High and Low tides, relative to MLLW: ')\n", + "\n", + "n_ext=len(ext_vals)\n", + "ext_hts=[]; ext_hilo=[]; ext_hrs=[]; ext_datetimes=[];\n", + "for i in range(n_ext):\n", + " ext_tuple=ext_vals[i]\n", + " ext_datetimes.append(ext_tuple[0])\n", + " ext_hts.append(ext_tuple[1])\n", + " ext_hilo.append(ext_tuple[2])\n", + " ext_hrs.append( ((ext_tuple[0] - prediction_t0).total_seconds())/3600.)\n", + "ext_hrs=np.array(ext_hrs)\n", + "ext_hts=np.array(ext_hts)\n", + "\n", + "#Print the extrema information\n", + "print (' ')\n", + "print (' Date time Hrs Elevation Hi-Low ') \n", + "for i in range(n_ext):\n", + " print ('%s %8.3f %8.3f m %8s ' %\\\n", + " (ext_datetimes[i].strftime('%Y-%m-%d %H:%M:%S'), ext_hrs[i],ext_hts[i],ext_hilo[i]) )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Plot the predicted tide" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "titlestr = 'January 1-2, 2013 Example Tides \\n' +\\\n", + " 'Kings Point, NY (Station 8516945)'\n", + "\n", + "plt.figure(figsize=(13,6))\n", + "plt.plot(hours, my_prediction, label=\"The data (38 NOAA)\")\n", + "plt.plot(ext_hrs,ext_hts,'ro',label=\"Extrema\")\n", + "plt.xticks(np.arange(0,49,12))\n", + "plt.xlabel('Hours since ' + str(prediction_t0) + '(GMT)')\n", + "plt.ylabel('Meters above MLLW')\n", + "plt.axis([-1,49,-1,4])\n", + "plt.legend(loc='upper left')\n", + "plt.grid(True)\n", + "plt.title(titlestr);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Download harmonic consituents\n", + "\n", + "Rather than typing in the harmonic constituents as done above, it is much easier and less prone to error to download them directly from the NOAA website. The code below should produce the same constituents as used above.\n", + "\n", + "We use a function in the GeoClaw `tidetools` module to do this. Eventually this will be moved to geoclaw, but is local to this repository for development purposes..." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pathstr = os.path.abspath('..')\n", + "if pathstr not in sys.path:\n", + " sys.path.insert(0,pathstr)\n", + "import tidetools\n", + "\n", + "print('Using tidetools from: %s' % tidetools.__file__)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "station = 8516945 # Kings point, NY\n", + "print('Fetching harmonic constituents for station %s, standard 37 -- no Z0' % station)\n", + "harcon, harcon_info = tidetools.fetch_harcon(station, units='meters', verbose=False)\n", + "\n", + "numbers = list(range(1,38)) \n", + "harcon_numbers = [h['number'] for h in harcon]\n", + "# make sure there are the expected number and in the right order:\n", + "assert harcon_numbers == numbers, \\\n", + " '*** unexpected harcon_numbers = %s' % numbers" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that `harcon` is now a dictionary with keys such as `number`, `name`, `amplitude`, etc.\n", + " \n", + "Print out these constituents, same as above but with names that agree with the [NOAA page](https://tidesandcurrents.noaa.gov/harcon.html?unit=0&timezone=0&id=8516945&name=Kings+Point&state=NY), and with more digits of precision in the amplitudes:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print('# Name Amplitude Phase')\n", + "for k,h in enumerate(harcon):\n", + " print('%s %s %.5f %9.4f' \\\n", + " % (str(h['number']).ljust(4), h['name'].ljust(7), h['amplitude'], h['phase_GMT']))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Note that: harcon_info['units'] = %s\" % harcon_info['units'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "But our `tidetools` function converted the units to meters in computing `harcon`, since that's what we requested above." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Translate harcon into a pytides model:\n", + "\n", + "We can translate the `harcon` dictionary into the model needed by pytides as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "NOAA_constituents = [c for c in cons.noaa if c != cons._Z0]\n", + "\n", + "#Set the amplitudes and phases lists that will be needed for Pytides\n", + "NOAA_amplitudes = [h['amplitude'] for h in harcon] #in meters\n", + "\n", + "#These are relative to GMT (0 deg West time meridan)\n", + "NOAA_phases_GMT = [h['phase_GMT'] for h in harcon] \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "MSL = 5.113\n", + "MLLW = 3.928\n", + "offset = MSL - MLLW\n", + "NOAA_constituents.append(cons._Z0)\n", + "NOAA_phases_GMT.append(0)\n", + "NOAA_amplitudes.append(offset)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert(len(constituents) == len(NOAA_phases_GMT) \\\n", + " == len(NOAA_amplitudes))\n", + "NOAA_model = np.zeros(len(NOAA_constituents), dtype = Tide.dtype)\n", + "NOAA_model['constituent'] = NOAA_constituents\n", + "NOAA_model['amplitude'] = NOAA_amplitudes\n", + "NOAA_model['phase'] = NOAA_phases_GMT\n", + "\n", + "#Build a TIDE INSTANCE called tide from the MODEL called model\n", + "NOAA_tide = Tide(model=NOAA_model,radians=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print('Predicted tide on January 1, 2013 relative to MLLW...')\n", + "print(' at 00:00 GMT: %.3fm\\n at 06:00 GMT: %.3fm' \\\n", + " % tuple(NOAA_tide.at([datetime(2013,1,1,0,0,0), datetime(2013,1,1,6,0,0)])))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is slightly different from the values obtained earlier because `harcon` has more digits in the constituents than what appears on the NOAA website and at the top of the notebook." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.10" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/pytides_examples/pytides_tools.py b/pytides_examples/pytides_tools.py new file mode 100644 index 0000000..79bb89b --- /dev/null +++ b/pytides_examples/pytides_tools.py @@ -0,0 +1,51 @@ +""" +Functions to facilitate using PyTides. +""" + +import sys, os + +# Temporary fix to import local submodule version of pytides: +CLAW = os.environ['CLAW'] # path to Clawpack files +pathstr = os.path.join(CLAW, 'tidal-examples/pytides') +assert os.path.isdir(pathstr), '*** clawpack/tidal-examples/pytides ***' +print('pytides_tools is using Pytides from: %s' % pathstr) +if pathstr not in sys.path: + sys.path.insert(0,pathstr) + +from pytides.tide import Tide +import numpy as np + +def new_tide_instance_from_existing(constit_list,existing_tide_instance): + """ + constit_list is the list of constituents to be used in the + new_tide_instance. + The values of the amplitudes and phases for each of them is to be + pulled from an existing_tide_instance. If no such constituent is in + the existing_tide_instance, an error message is printed. + """ + existing_constits = existing_tide_instance.model['constituent'] + existing_amps = existing_tide_instance.model['amplitude'] + existing_phases = existing_tide_instance.model['phase'] + len_existing = len(existing_constits) + new_model = np.zeros(len(constit_list), dtype = Tide.dtype) + new_constits=[]; new_amps=[]; new_phases=[]; + for ic in constit_list: + success = False + for j in range(len_existing): + ie = existing_constits[j] + if (ie.name == ic.name): #grab it + success = True + new_constits.append(ie) + new_amps.append(existing_amps[j]) + new_phases.append(existing_phases[j]) + if (success == False): + print ('Did not find consituent name: ',ic.name,\ + 'in existing tide instance') + new_model['constituent'] = new_constits + new_model['amplitude'] = new_amps + new_model['phase'] = new_phases + + # The new_model is now complete, so make a tide instance called + #called new_tide from it. + new_tide = Tide(model = new_model, radians = False) + return new_tide diff --git a/tidetools.py b/tidetools.py new file mode 100644 index 0000000..424af66 --- /dev/null +++ b/tidetools.py @@ -0,0 +1,615 @@ +#!/usr/bin/env python + +""" +EVENTUALLY MOVE THIS TO GEOCLAW! + +GeoClaw util Module `$CLAW/geoclaw/src/python/geoclaw/tidetools.py` + +Module provides provides tide prediction functions. + +:Functions: + + - retrieve_constituents - retrieves harmonic constituents from NOAA gauge station + - retrieve_water_levels - retrieves observed water levels from NOAA's API + - retrieve_predicted_tide - retrieves predicted tide from NOAA's API + - datum_value - retrieves datum value for desired datum reference + - predict_tide - predicts tide with Pytides + - datetimes - prepares a collection of datetimes from beginning to end dates + - detide - detides observed water levels + - surge - predicts surge at NOAA gauge station +""" + +from __future__ import absolute_import +from __future__ import print_function +from collections.abc import Iterable +from collections import OrderedDict, namedtuple +from scipy.optimize import leastsq, fsolve +from itertools import takewhile, count +from datetime import datetime, timedelta +from functools import reduce +from six.moves.urllib.parse import urlencode +from six.moves.urllib.request import urlopen + +try: + from itertools import izip, ifilter +except ImportError: #Python3 + izip = zip + ifilter = filter + +try: + import requests + import json + import string + import lxml.html as lh + import pandas as pd + import operator as op + import numpy as np + import io + import os + import os.path +except ImportError as e: + print(e) + +# Here's what we'd like to do: (?) +#from clawpack.pytides.tide import Tide +#import clawpack.pytides.constituent as cons +# + +# For now, hardwire in the path... +import sys, os +CLAW = os.environ['CLAW'] # path to Clawpack files +pathstr = os.path.join(CLAW, 'tidal-examples/pytides') +assert os.path.isdir(pathstr), '*** Need clawpack/tidal-examples/pytides ***' +print('Using Pytides from: %s' % pathstr) +if pathstr not in sys.path: + sys.path.insert(0,pathstr) + +from pytides.tide import Tide + + + +d2r, r2d = np.pi/180.0, 180.0/np.pi + +NOAA_API_URL = 'https://tidesandcurrents.noaa.gov/api/datagetter' +NOAA_home = 'https://tidesandcurrents.noaa.gov/harcon.html' + +######################## Tide Prediction Functions ######################## + +def retrieve_constituents(station, time_zone='GMT', units='meters', cache_dir=None, + verbose=True): + + """Fetch constituent data for given NOAA tide station. + By default, retrieved data is cached in the geoclaw scratch directory + located at: + $CLAW/geoclaw/scratch + :Required Arguments: + - station (string): 7 character station ID + :Optional Arguments: + - time_zone (string): see NOAA API documentation for possible values + - units (string): see NOAA API documentation for possible values + - cache_dir (string): alternative directory to use for caching data + - verbose (bool): whether to output informational messages + :Returns: + - constituent_dict (dictionary): dictionary of tidal constituents for NOAA gauge station + """ + + def units_num(units): + if (units == 'meters'): + return 0 + elif (time_zone == 'feet'): + return 1 + + def time_zone_num(time_zone): + if (time_zone == 'GMT'): + return 0 + elif (time_zone == 'Local'): + return 1 + + def get_noaa_params(station, time_zone, units): + noaa_params = { + 'unit': units_num(units), + 'timezone': time_zone_num(time_zone), + 'id': station + } + return noaa_params + + # use geoclaw scratch directory for caching by default + if cache_dir is None: + if 'CLAW' not in os.environ: + raise ValueError('CLAW environment variable not set') + claw_dir = os.environ['CLAW'] + cache_dir = os.path.join(claw_dir, 'geoclaw', 'scratch') #### cache_dir + + def get_cache_path(station, time_zone, units): + filename = '{}_{}_constituents'.format(time_zone, units) + abs_cache_dir = os.path.abspath(cache_dir) + return os.path.join(abs_cache_dir, 'constituents', station, filename) + + def save_to_cache(cache_path, data): + # make parent directories if they do not exist + parent_dir = os.path.dirname(cache_path) + if not os.path.exists(parent_dir): + os.makedirs(parent_dir) + + component_array = pd.DataFrame(data) + component_array.to_csv(cache_path, index=False) + + def parse(cache_path): + # read data into structured array, skipping header row if present + data = pd.read_csv(cache_path) + component_array = pd.DataFrame(data) + component_dict = component_array.to_dict(orient='list') + return component_dict + + #Requests URL + def fetch_data(station, time_zone, units): + noaa_params = get_noaa_params(station, time_zone, units) + cache_path = get_cache_path(station, time_zone, units) + + # use cached data if available + if os.path.exists(cache_path): + if verbose: + print('Using cached constituent data for station {}'.format(station)) + return parse(cache_path) + + + # otherwise, retrieve data from NOAA and cache it + if verbose: + print('Fetching constituent data from NOAA for station {}'.format(station)) + + #Forms URL + url = '{}?{}'.format(NOAA_home, urlencode(noaa_params)) + + page = requests.get(url) + doc = lh.fromstring(page.content) + tr_elements = doc.xpath('//tr') + col = [((t.text_content(),[])) for t in tr_elements[0]] + for j in range(1, len(tr_elements)): + T, i = tr_elements[j], 0 + for t in T.iterchildren(): + col[i][1].append(t.text_content()) + i+=1 + + constituent_dict = {title:column for (title,column) in col} + + # if there were no errors, then cache response + save_to_cache(cache_path, constituent_dict) + + return constituent_dict + + + try: + constituents = fetch_data(station, time_zone, units) + + except: + print('*** Fetching NOAA Constituents failed, returning None') + constituents = None + + return constituents + + +def fetch_noaa_tide_data(station, begin_date, end_date, datum='MTL', time_zone='GMT', units='metric', cache_dir=None, verbose=True): + """Fetch water levels and tide predictions at given NOAA tide station. + The data is returned in 6 minute intervals between the specified begin and + end dates/times. A complete specification of the NOAA CO-OPS API for Data + Retrieval used to fetch the data can be found at: + https://tidesandcurrents.noaa.gov/api/ + By default, retrieved data is cached in the geoclaw scratch directory + located at: + $CLAW/geoclaw/scratch + :Required Arguments: + - station (string): 7 character station ID + - begin_date (datetime): start of date/time range of retrieval + - end_date (datetime): end of date/time range of retrieval + :Optional Arguments: + - datum (string): see NOAA API documentation for possible values + - time_zone (string): see NOAA API documentation for possible values + - units (string): see NOAA API documentation for possible values + - cache_dir (string): alternative directory to use for caching data + - verbose (bool): whether to output informational messages + :Returns: + - date_time (numpy.ndarray): times corresponding to retrieved data + - water_level (numpy.ndarray): preliminary or verified water levels + - prediction (numpy.ndarray): tide predictions + """ + # use geoclaw scratch directory for caching by default + if cache_dir is None: + if 'CLAW' not in os.environ: + raise ValueError('CLAW environment variable not set') + claw_dir = os.environ['CLAW'] + cache_dir = os.path.join(claw_dir, 'geoclaw', 'scratch') + + def fetch(product, expected_header, col_idx, col_types): + noaa_params = get_noaa_params(product) + cache_path = get_cache_path(product) + + # use cached data if available + if os.path.exists(cache_path): + if verbose: + print('Using cached {} data for station {}'.format( + product, station)) + return parse(cache_path, col_idx, col_types, header=True) + + # otherwise, retrieve data from NOAA and cache it + if verbose: + print('Fetching {} data from NOAA for station {}'.format( + product, station)) + full_url = '{}?{}'.format(NOAA_API_URL, urlencode(noaa_params)) + with urlopen(full_url) as response: + text = response.read().decode('utf-8') + with io.StringIO(text) as data: + # ensure that received header is correct + header = data.readline().strip() + if header != expected_header or 'Error' in text: + # if not, response contains error message + raise ValueError(text) + + # if there were no errors, then cache response + save_to_cache(cache_path, text) + + return parse(data, col_idx, col_types, header=False) + + def get_noaa_params(product): + noaa_date_fmt = '%Y%m%d %H:%M' + noaa_params = { + 'product': product, + 'application': 'Clawpack', + 'format': 'csv', + 'station': station, + 'begin_date': begin_date.strftime(noaa_date_fmt), + 'end_date': end_date.strftime(noaa_date_fmt), + 'time_zone': time_zone, + 'datum': datum, + 'units': units + } + return noaa_params + + def get_cache_path(product): + cache_date_fmt = '%Y%m%d%H%M' + dates = '{}_{}'.format(begin_date.strftime(cache_date_fmt), + end_date.strftime(cache_date_fmt)) + filename = '{}_{}_{}'.format(time_zone, datum, units) + abs_cache_dir = os.path.abspath(cache_dir) + return os.path.join(abs_cache_dir, product, station, dates, filename) + + def save_to_cache(cache_path, data): + # make parent directories if they do not exist + parent_dir = os.path.dirname(cache_path) + if not os.path.exists(parent_dir): + os.makedirs(parent_dir) + + # write data to cache file + with open(cache_path, 'w') as cache_file: + cache_file.write(data) + + def parse(data, col_idx, col_types, header): + # read data into structured array, skipping header row if present + a = np.genfromtxt(data, usecols=col_idx, dtype=col_types, + skip_header=int(header), delimiter=',', + missing_values='') + + # return tuple of columns + return tuple(a[col] for col in a.dtype.names) + + # only need first two columns of data; first column contains date/time, + # and second column contains corresponding value + col_idx = (0, 1) + col_types = 'datetime64[m], float' + + # fetch water levels and tide predictions + try: + date_time, water_level = fetch( + 'water_level', 'Date Time, Water Level, Sigma, O or I (for verified), F, R, L, Quality', + col_idx, col_types) + except: + print('*** Fetching water_level failed, returning None') + date_time = None + water_level = None + + try: + date_time2, prediction = fetch('predictions', 'Date Time, Prediction', + col_idx, col_types) + if date_time is None: + date_time = date_time2 + + except: + print('*** Fetching prediction failed, returning None') + date_time2 = None + prediction = None + + # ensure that date/time ranges are the same + if (date_time is not None) and (date_time2 is not None): + if not np.array_equal(date_time, date_time2): + raise ValueError('Received data for different times') + + return date_time, water_level, prediction + + +def datum_value(station, datum, time_zone='GMT', units='metric'): + """Fetch datum value for given NOAA tide station. + :Required Arguments: + - station (string): 7 character station ID + - datum (string): MSL, MTL + :Optional Arguments: + - time_zone (string): see NOAA API documentation for possible values + - units (string): see NOAA API documentation for possible values + :Returns: + - datum_value (float): value for requested datum reference + """ + def get_noaa_params(station, time_zone, units): + noaa_params = { + 'product': 'datums', + 'units': units, + 'time_zone': time_zone, + 'station': station, + 'application': 'Clawpack', + 'format':'json' + } + return noaa_params + + #Scrapes MTL/MSL Datum Value + def get_datum(station, time_zone, units): + noaa_params = get_noaa_params(station, time_zone, units) + url = '{}?{}'.format(NOAA_API_URL, urlencode(noaa_params)) + page_data = requests.get(url) + data = page_data.json()['datums'] + datum_value = [d['v'] for d in data if d['n'] == datum] + return datum_value + + try: + datum_value = float(get_datum(station, time_zone, units)[0]) + except: + print('*** Fetching datum value failed, returning None') + datum_value = None + + return datum_value + + +def predict_tide(station, begin_date, end_date, datum='MTL', time_zone='GMT', units='meters'): + """Fetch datum value for given NOAA tide station. + :Required Arguments: + - station (string): 7 character station ID + - begin_date (datetime): start of date/time range of prediction + - end_date (datetime): end of date/time range of prediction + :Optional Arguments: + - datum (string): MTL for tide prediction + - time_zone (string): see NOAA API documentation for possible values + - units (string): see NOAA API documentation for possible values + :Returns: + - heights (float): tide heights + """ + #These are the NOAA constituents, in the order presented on NOAA's website. + from pytides.constituent import noaa, _Z0 + + constituents = [c for c in noaa if c != _Z0] + noaa_values = retrieve_constituents(station, time_zone, units) + noaa_amplitudes = [float(amplitude) for amplitude in noaa_values['Amplitude']] + noaa_phases = [float(phases) for phases in noaa_values['Phase']] + + #We can add a constant offset - set to MTL + # MTL = datum_value(args[0], 'MTL') + desired_datum = datum_value(station, datum) + MSL = datum_value(station, 'MSL') + offset = MSL - desired_datum + constituents.append(_Z0) + noaa_phases.append(0) + noaa_amplitudes.append(offset) + + #Build the model + assert(len(constituents) == len(noaa_phases) == len(noaa_amplitudes)) + model = np.zeros(len(constituents), dtype = Tide.dtype) + model['constituent'] = constituents + model['amplitude'] = noaa_amplitudes + model['phase'] = noaa_phases + tide = Tide(model = model, radians = False) + + #Time Calculations + delta = (end_date-begin_date)/timedelta(hours=1) + .1 + times = Tide._times(begin_date, np.arange(0, delta, .1)) + + #Height Calculations + heights_arrays = [tide.at([times[i]]) for i in range(len(times))] + heights = [val for sublist in heights_arrays for val in sublist] + + return heights + + +def datetimes(begin_date, end_date): + #Time Calculations + delta = (end_date-begin_date)/timedelta(hours=1) + .1 + times = Tide._times(begin_date, np.arange(0, delta, .1)) + return times + + +def detide(NOAA_observed_water_level, predicted_tide): + # NOAA observed water level - predicted tide + return [(NOAA_observed_water_level[i] - predicted_tide[i]) for i in range(len(NOAA_observed_water_level))] + +#Surge Implementation +def surge(station, beg_date, end_date, landfall_date): + """Fetch datum value for given NOAA tide station. + :Required Arguments: + - station (string): 7 character station ID + - begin_date (datetime): start of date/time range of prediction + - end_date (datetime): end of date/time range of prediction + - landfall_date (datetime): approximate time of landfall for reference + :Optional Arguments: + - datum (string): MTL for tide prediction and retrieval + - time_zone (string): see NOAA API documentation for possible values + :Returns: + - times (float): times with landfall event as reference + - surge (float): surge heights + """ + predicted_tide = predict_tide(station, beg_date, end_date) + NOAA_times, NOAA_observed_water_level, NOAA_predicted_tide = fetch_noaa_tide_data(station, beg_date, end_date) + + #detides NOAA observed water levels with predicted tide + surge = detide(NOAA_observed_water_level, predicted_tide) + #modifies NOAA times to datetimes + times = [((pd.to_datetime(time).to_pydatetime())-landfall_date)/timedelta(days=1) for time in NOAA_times] + + return times, surge + + + +# ====================== +# From fetch_noaa_gauge_info.py + + +url_base = 'https://api.tidesandcurrents.noaa.gov/mdapi/prod/webapi/stations' + +url_base_web = 'https://tidesandcurrents.noaa.gov/stationhome.html' + +station = '9441102' # Westport + +# Datums: + +def fetch_datums(station, units='meters', verbose=False): + + """ + Fetch datums for CO-OPS station. + Convert units if required. + If verbose is True, print out the station info and datums. + + Returns: + datums = list of dictionaries, one for each datum, with keys + 'name', 'description', 'value' + To print the datum names: + print([d['name'] for d in datums]) + values have been converted to specified units. + datums_info = dictionary with these keys: + 'accepted', 'superseded', 'epoch', 'units', 'OrthometricDatum', + 'datums', 'LAT', 'LATdate', 'LATtime', 'HAT', 'HATdate', + 'HATtime', 'min', 'mindate', 'mintime', 'max', 'maxdate', + 'maxtime', 'disclaimers', 'DatumAnalysisPeriod', 'NGSLink', + 'ctrlStation', 'self' + datums_info['datums'] is the list from which + datums was created, after possibly changing units. + """ + + assert units in ['meters','feet'], '*** unrecognized units: %s' % units + + try: + station_id = str(int(station)) # make sure it's a string of an int + except: + raise ValueError('Station cannot be converted to int') + + url_datums = '%s/%s/datums.json' % (url_base, station_id) + + with urlopen(url_datums) as response: + text = response.read().decode('utf-8') + + # convert to a dictionary: + p = json.loads(text) # dictionary + datums = p['datums'] # list of dictionaries, with keys 'name', 'value' + + noaa_units = p['units'] # probably 'feet' + + if noaa_units == 'feet' and units == 'meters': + for d in datums: + d['value'] = d['value'] * 0.3048 + + if noaa_units == 'meters' and units == 'feet': + for d in datums: + d['value'] = d['value'] / 0.3048 + + if verbose: + print('Datums info for station %s' % station_id) + print('Should agree with info at \n %s?id=%s' \ + % (url_base_web,station_id)) + for k in p.keys(): + if k != 'datums': + print('%s: %s' % (k.rjust(20), p[k])) + print('Datums will be returned in units = %s:' % units) + for d in datums: + print('%s: %11.3f %s' % (d['name'].rjust(7),d['value'],units)) + + datums_info = p # also return full dictionary + return datums, datums_info + + +def fetch_harcon(station, units='meters', verbose=False): + + """ + Fetch harmonic constituents for CO-OPS station. + Convert units of the amplitudes if required. + If verbose is True, print out the info and constinuents. + + Returns: + harcon = list of dictionaries, one for each constituent, with keys: + 'number', 'name', 'description', 'amplitude', + 'phase_GMT', 'phase_local', 'speed' + To print the constituent names: + print([h['name'] for h in harcon]) + amplitudes have been converted to specified units. + harcon_info = dictionary with keys: + 'units', 'HarmonicConstiuents', 'self' + harcon_info['HarmonicConstiuents'] is the list from which + harcon was created, after possibly changing units. + """ + + assert units in ['meters','feet'], '*** unrecognized units: %s' % units + + try: + station_id = str(int(station)) # make sure it's a string of an int + except: + raise ValueError('Station cannot be converted to int') + + url_harcon = '%s/%s/harcon.json' % (url_base, station_id) + + with urlopen(url_harcon) as response: + text = response.read().decode('utf-8') + + # convert to a dictionary: + p = json.loads(text) + hc = p['HarmonicConstituents'] # list of dictionaries + + noaa_units = p['units'] # probably 'feet' + + if noaa_units == 'feet' and units == 'meters': + for h in hc: + h['amplitude'] = h['amplitude'] * 0.3048 + + if noaa_units == 'meters' and units == 'feet': + for h in hc: + h['amplitude'] = h['amplitude'] / 0.3048 + + if verbose: + print('Harmonic constituent info for station %s' % station_id) + print('Should agree with info at \n %s?id=%s' \ + % (url_base_web,station_id)) + for k in p.keys(): + if k != 'HarmonicConstituents': + print('%s: %s' % (k.rjust(20), p[k])) + + print('Harmonic Constituents will be returned in units = %s:' % units) + + print(' Name amplitude (%s) phase (GMT) speed' % units) + for h in hc: + print('%s: %11.3f %20.3f %18.6f' % (h['name'].rjust(5),h['amplitude'], + h['phase_GMT'],h['speed'])) + + harcon = hc + harcon_info = p # also return full dictionary + + return harcon, harcon_info + +def make_pytides_model(station): + """ + Fetch harmonic constituents for station and return lists of 37 + amplitudes (meters) and phases (GMT) as required for a pytides model. + """ + + print('Fetching harmonic constituents...') + harcon, harcon_info = fetch_harcon(station, units='meters', verbose=False) + + numbers = list(range(1,38)) # assume standard 37 constituents + harcon_numbers = [h['number'] for h in harcon] + # make sure there are the expected number and in the right order: + assert harcon_numbers == numbers, '*** unexpected harcon_numbers = %s' \ + % harcon_numbers + + amplitudes = [h['amplitude'] for h in harcon] + phases = [h['phase_GMT'] for h in harcon] + + return amplitudes, phases \ No newline at end of file