Skip to content
Snippets Groups Projects
Commit ff3f73a7 authored by Carsten Hinz's avatar Carsten Hinz
Browse files

changed ID and filename creation to include variable names

added type_of_area for separation of stations
parent 0c1bc74b
No related branches found
No related tags found
1 merge request!11Creation of first beta release version
%% Cell type:code id: tags:
``` python
from datetime import datetime, timedelta
from toargridding.metadata import TimeSample, Metadata
sampling = "daily" # FIXME check monthly !!!
start = datetime(2010, 1, 1)
end = datetime(2010, 2, 1)
statistics_endpoint = "https://toar-data.fz-juelich.de/api/v2/analysis/statistics/"
statistic = "mean"
time = TimeSample(start, end, sampling=sampling)
# { "station_type_of_area" : "urban" } category is not known
#metadata = Metadata.construct("mole_fraction_of_ozone_in_air", time, statistic, { "toar1_category" : "RuralLowElevation"})#
metadata = Metadata.construct("mole_fraction_of_ozone_in_air", time, statistic, { "type_of_area" : "Urban" })
metadata = Metadata.construct("mole_fraction_of_ozone_in_air", time, statistic, { "type_of_area" : "Urban" })#also test Rural, Suburban,
start_time = datetime.now()
print(start_time)
```
%% Cell type:code id: tags:
``` python
from pathlib import Path
from toargridding.toar_rest_client import AnalysisServiceDownload
#creation of output directories
toargridding_base_path = Path(".")
cache_dir = toargridding_base_path / "results"
download_dir = toargridding_base_path / "data"
cache_dir.mkdir(parents=True, exist_ok=True)
download_dir.mkdir(parents=True, exist_ok=True)
analysis_service = AnalysisServiceDownload(statistics_endpoint, cache_dir, download_dir)
results = analysis_service.get_data(metadata)
end_time = datetime.now()
print(end_time-start_time)
```
%% Cell type:code id: tags:
``` python
```
......
%% Cell type:code id: tags:
``` python
from datetime import datetime as dt
from collections import namedtuple
from pathlib import Path
from toargridding.toar_rest_client import AnalysisServiceDownload
from toargridding.grids import RegularGrid
from toargridding.gridding import get_gridded_toar_data
from toargridding.metadata import TimeSample
```
%% Cell type:code id: tags:
``` python
#creation of request.
Config = namedtuple("Config", ["grid", "time", "variables", "stats","moreOptions"])
#moreOptions is implemented as a dict to add additional arguments to the query to the REST API
#For example the field toar1_category with its possible values Urban, RuralLowElevation, RuralHighElevation and Unclassified can be added.
#see page 18 in https://toar-data.fz-juelich.de/sphinx/TOAR_UG_Vol03_Database/build/latex/toardatabase--userguide.pdf
details4Query ={
"toar1_category" : "Urban" #uncomment if wished:-)
#"toar1_category" : "Urban" #uncomment if wished:-)
#"toar1_category" : "RuralLowElevation" #uncomment if wished:-)
#"toar1_category" : "RuralHighElevation" #uncomment if wished:-)
#"type_of_area" : "Urban" #also test Rural, Suburban,
"type_of_area" : "Rural" #also test Rural, Suburban,
#"type_of_area" : "Suburban" #also test Rural, Suburban,
}
valid_data = Config(
RegularGrid( lat_resolution=1.9, lon_resolution=2.5, ),
TimeSample( start=dt(2000,1,1), end=dt(2019,12,31), sampling="daily"),#possibly adopt range:-)
["mole_fraction_of_ozone_in_air"],#variable name
[ "mean", "dma8epax"],# will start one request after another other...
#[ "mean", "dma8epax"],# will start one request after another other...
[ "dma8epa_strict" ],
details4Query
)
configs = {
"test_ta" : valid_data
}
#testing access:
config = configs["test_ta"]
config.grid
```
%% Cell type:code id: tags:
``` python
#CAVE: this cell runs about 30minutes per requested year
#the processing is done on the server of the TOAR database.
#a restart of the cell continues the request to the REST API if the requested data are ready for download
# The download can also take a few minutes
stats_endpoint = "https://toar-data.fz-juelich.de/api/v2/analysis/statistics/"
cache_basepath = Path("cache")
result_basepath = Path("results")
cache_basepath.mkdir(exist_ok=True)
result_basepath.mkdir(exist_ok=True)
analysis_service = AnalysisServiceDownload(stats_endpoint=stats_endpoint, cache_dir=cache_basepath, sample_dir=result_basepath, use_downloaded=True)
for person, config in configs.items():
datasets, metadatas = get_gridded_toar_data(
analysis_service=analysis_service,
grid=config.grid,
time=config.time,
variables=config.variables,
stats=config.stats,
**config.moreOptions
)
for dataset, metadata in zip(datasets, metadatas):
dataset.to_netcdf(result_basepath / f"{metadata.get_id()}.nc")
print(metadata.get_id())
```
......
......@@ -74,7 +74,7 @@ class QueryOptions:
def cache_key(self):
"""creation to identify the request in the cache of known request.
"""
return "".join(asdict(self, dict_factory=quarryToDict).values())
return "".join(f"{key}{val}" for key, val in asdict(self, dict_factory=quarryToDict).items())
def quarryToDict(data : QueryOptions):
......@@ -494,5 +494,5 @@ class AnalysisServiceDownload(AnalysisService):
metadata:
metadata for the request.
"""
addition = "_".join(str(i) for i in sorted(metadata.moreOptions.values()))
addition = "_".join(f"{key}{val}" for key,val in sorted(metadata.moreOptions).items())
return "_".join(str(i) for i in [metadata.statistic, metadata.time.sampling, metadata.variable.cf_standardname, metadata.time.start.date(), metadata.time.end.date(), addition]) + ".zip"
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment