diff --git a/.gitignore b/.gitignore
index 450b221944f0a911fb8bae441e04d4fdd4ecb7de..080382510bd120367b134f02a3f2cc9fecdd0883 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,3 +9,4 @@ tests/data/
 data/
 tests/results
 tests/temp_data_cache/
+notTracked/
diff --git a/README.md b/README.md
index aeae3c6c0d72069df37baf40c9b7c82747e8641a..d55e2ce2164fb0690523a95e59f8d85158dab8cd 100644
--- a/README.md
+++ b/README.md
@@ -40,26 +40,56 @@ The handling of required packages is done with poetry. So run poetry in the proj
 poetry install
 ```
 
+# How does this tool work?
+
+This tool has two main parts. The first handles requests to the TOAR database and the analysis of the data.
+The second part is the gridding, which is performed offline
+
+## Request to TOAR Database with Statistical Analysis 
+
+Requests are send to the analysis service of the TOAR database. This allows a selection of different stations base on their metadata and performing a statistical analysis.
+Whenever a request is submitted, it will be processed. The returned status endpoint will point ot the results as soon as the process is finished. 
+A request can take several hours, depending on time range and the number of requested stations. 
+At the moment, there is no possibility implemented to check the status of a running job until it is finished (Date: 2024-05-14).
+
+As soon as a request is finished, the status endpoint will not be valid forever. The data will be stored longer in a cache by the analysis service. As soon as the same request is submitted, first the cache is checked, if the results have already been calculated. The retrieval of the results from the cache can take some time, similar to the analysis.
+
+There is no check, if a request is already running. Therefore, submitting a request multiple times, leads to additional load on the system and slows down all requests. 
+
+The TOAR database has only a limited number of workers for performing a statistical analysis. Therefore, it is advised to run one request after another, especially for large requests covering a large number of stations and or a longer time.
+
+## Gridding
+
+The gridding uses a user defined grid to combine all stations in a cell.
+Per cell mean, standard deviation and the number of stations are reported.
+
 # Example
 
 There are at the moment three example provided as jupyter notebooks (https://jupyter.org/).
 
-Running them with the python environment produced by peotry can be done by
+Running them with the python environment produced by poetry can be done by
 ```
 poetry run jupyter notebook
 ```
 
 ##  High level function
 ```
-tests/produce_data.ipynb 
+tests/produce_data_withOptional.ipynb 
 ```
 Provides an example on how to download data, apply gridding and save the results as netCDF files.
-A possible improvement for is the exchange of the AnalysisService with AnalysisServiceDownload, which caches requests from the TOARDB. 
+The AnalysisServiceDownload caches already obtained data on the local machine.
 This allows different griddings without the necessity to repeat the request to the TOARDB and subsequent download.
 
-The example uses a dictionary to pass additional arguments to the request to the TAOR database. 
+In total two requests are executed. 
+The example uses a dictionary to pass additional arguments to the request to the TAOR database (here: station category from TOAR). 
 A detailed list can be found at https://toar-data.fz-juelich.de/api/v2/#stationmeta
 
+```
+tests/produce_data_manyStations.ipynb
+```
+Uses a similar request, but without the restriction to the station type. Therefore, a much larger number of stations is requested (about 1000 compared to a few hundred, that have a "toar1_category" classification used in the previous example).
+Therefore, this example is restricted to the calculation of "dma8epax".
+
 ## Retrieving data
 ```
 tests/get_sample_data.ipynb 
@@ -73,7 +103,7 @@ tests/get_sample_data_manual.ipynb
 Downloads data from the TOAR database with a manual creation of the request to the TOAR database.
 This example does not perform any gridding.
 
-## Retriving data and visualization
+## Retrieving data and visualization
 ```
 tests/quality_controll.ipynb
 ```
diff --git a/tests/get_sample_data.ipynb b/tests/get_sample_data.ipynb
index b5f33adbd0dbf31f74e0c6e8a51b9968343381db..0e5a9b30dfe09d3573fe275c44a096d8d88c2c51 100644
--- a/tests/get_sample_data.ipynb
+++ b/tests/get_sample_data.ipynb
@@ -19,7 +19,7 @@
     "\n",
     "time = TimeSample(start, end, sampling=sampling)\n",
     "# { \"station_type_of_area\" : \"urban\" } category is not known\n",
-    "metadata = Metadata.construct(\"mole_fraction_of_ozone_in_air\", time, statistic, { \"toar1_category\" : \"RuralHighElevation\"})#\n",
+    "metadata = Metadata.construct(\"mole_fraction_of_ozone_in_air\", time, statistic, { \"toar1_category\" : \"RuralLowElevation\"})#\n",
     "\n",
     "start_time = datetime.now()\n",
     "print(start_time)"
diff --git a/tests/produce_data_manyStations.ipynb b/tests/produce_data_manyStations.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..625a0cd5850af58df00b1b0d795297f16e9e61b9
--- /dev/null
+++ b/tests/produce_data_manyStations.ipynb
@@ -0,0 +1,99 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from datetime import datetime as dt\n",
+    "from collections import namedtuple\n",
+    "from pathlib import Path\n",
+    "\n",
+    "from toargridding.toar_rest_client import AnalysisServiceDownload\n",
+    "from toargridding.grids import RegularGrid\n",
+    "from toargridding.gridding import get_gridded_toar_data\n",
+    "from toargridding.metadata import TimeSample"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#creation of request.\n",
+    "\n",
+    "Config = namedtuple(\"Config\", [\"grid\", \"time\", \"variables\", \"stats\",\"moreOptions\"])\n",
+    "\n",
+    "valid_data = Config(\n",
+    "    RegularGrid( lat_resolution=1.9, lon_resolution=2.5, ),\n",
+    "    TimeSample( start=dt(2000,1,1), end=dt(2019,12,31), sampling=\"daily\"),#possibly adopt range:-)\n",
+    "    [\"mole_fraction_of_ozone_in_air\"],#variable name\n",
+    "    [ \"dma8epax\" ]\n",
+    ")\n",
+    "\n",
+    "configs = {\n",
+    "    \"test_ta\"  : valid_data\n",
+    "}\n",
+    "\n",
+    "#testing access:\n",
+    "#config = configs[\"test_ta\"]\n",
+    "#config.grid"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#CAVE: the request takes over 30min per requested year. Therefore this cell needs to be executed at different times to check, if the results are ready for download.\n",
+    "#the processing is done on the server of the TOAR database.\n",
+    "#a restart of the cell continues the request to the REST API if the requested data are ready for download\n",
+    "# The download can also take a few minutes\n",
+    "\n",
+    "stats_endpoint = \"https://toar-data.fz-juelich.de/api/v2/analysis/statistics/\"\n",
+    "cache_basepath = Path(\"cache\")\n",
+    "result_basepath = Path(\"results\")\n",
+    "cache_basepath.mkdir(exist_ok=True)\n",
+    "result_basepath.mkdir(exist_ok=True)\n",
+    "analysis_service = AnalysisServiceDownload(stats_endpoint=stats_endpoint, cache_dir=cache_basepath, sample_dir=result_basepath, use_downloaded=True)\n",
+    "\n",
+    "for person, config in configs.items():\n",
+    "    datasets, metadatas = get_gridded_toar_data(\n",
+    "        analysis_service=analysis_service,\n",
+    "        grid=config.grid,\n",
+    "        time=config.time,\n",
+    "        variables=config.variables,\n",
+    "        stats=config.stats,\n",
+    "    )\n",
+    "\n",
+    "    for dataset, metadata in zip(datasets, metadatas):\n",
+    "        dataset.to_netcdf(result_basepath / f\"{metadata.get_id()}.nc\")\n",
+    "        print(metadata.get_id())"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "toargridding-8RVrxzmn-py3.11",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.11.7"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/tests/produce_data.ipynb b/tests/produce_data_withOptional.ipynb
similarity index 85%
rename from tests/produce_data.ipynb
rename to tests/produce_data_withOptional.ipynb
index 74070660c76cd79385fe6f9bbdc99ca957470d13..5450e57540f8b82c5505afa2ac507cd5d5207773 100644
--- a/tests/produce_data.ipynb
+++ b/tests/produce_data_withOptional.ipynb
@@ -31,14 +31,16 @@
     "#see page 18 in https://toar-data.fz-juelich.de/sphinx/TOAR_UG_Vol03_Database/build/latex/toardatabase--userguide.pdf\n",
     "\n",
     "details4Query ={\n",
-    "    #\"toar1_category\" : \"Urban\" #uncomment if wished:-)\n",
+    "    \"toar1_category\" : \"Urban\" #uncomment if wished:-)\n",
+    "    #\"toar1_category\" : \"RuralLowElevation\" #uncomment if wished:-)\n",
+    "    #\"toar1_category\" : \"RuralHighElevation\" #uncomment if wished:-)\n",
     "}\n",
     "\n",
     "valid_data = Config(\n",
     "    RegularGrid( lat_resolution=1.9, lon_resolution=2.5, ),\n",
-    "    TimeSample( start=dt(2000,1,1), end=dt(2019,12,31), sampling=\"daily\"),\n",
+    "    TimeSample( start=dt(2000,1,1), end=dt(2019,12,31), sampling=\"daily\"),#possibly adopt range:-)\n",
     "    [\"mole_fraction_of_ozone_in_air\"],#variable name\n",
-    "    [\"mean\", \"dma8epax\"],# will start one request after another other...\n",
+    "    [ \"mean\", \"dma8epax\"],# will start one request after another other...\n",
     "    details4Query\n",
     ")\n",
     "\n",
@@ -47,8 +49,8 @@
     "}\n",
     "\n",
     "#testing access:\n",
-    "#config = configs[\"test_ta\"]\n",
-    "#config.grid"
+    "config = configs[\"test_ta\"]\n",
+    "config.grid"
    ]
   },
   {
@@ -67,7 +69,7 @@
     "result_basepath = Path(\"results\")\n",
     "cache_basepath.mkdir(exist_ok=True)\n",
     "result_basepath.mkdir(exist_ok=True)\n",
-    "analysis_service = AnalysisServiceDownload(stats_endpoint=stats_endpoint, cache_dir=cache_basepath, sample_dir=result_basepath)\n",
+    "analysis_service = AnalysisServiceDownload(stats_endpoint=stats_endpoint, cache_dir=cache_basepath, sample_dir=result_basepath, use_downloaded=True)\n",
     "\n",
     "for person, config in configs.items():\n",
     "    datasets, metadatas = get_gridded_toar_data(\n",
@@ -75,8 +77,8 @@
     "        grid=config.grid,\n",
     "        time=config.time,\n",
     "        variables=config.variables,\n",
-    "        stats=config.stats\n",
-    "        #**config.moreOptions\n",
+    "        stats=config.stats,\n",
+    "        **config.moreOptions\n",
     "    )\n",
     "\n",
     "    for dataset, metadata in zip(datasets, metadatas):\n",
@@ -101,7 +103,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.11.5"
+   "version": "3.11.7"
   }
  },
  "nbformat": 4,
diff --git a/toargridding/metadata.py b/toargridding/metadata.py
index 6ace341523dda3f8738f35d491656e3ab5a6c39e..b96deda485d415636325aa60f5a9fc5486e0e5de 100644
--- a/toargridding/metadata.py
+++ b/toargridding/metadata.py
@@ -61,8 +61,8 @@ class TimeSample:
     def as_datetime_index(self) -> pd.DatetimeIndex:
         """Conversion to array with all sampled time points
         """
-        print(self.start)
-        print(self.end)
+        #print(self.start)
+        #print(self.end)
         return pd.period_range(self.start, self.end, freq=self.frequency).to_timestamp()
 
     @property
@@ -149,7 +149,8 @@ class Metadata:
 
         For example, used for saving link to results of a request in the cache.
         """
-        return f"{self.variable.name}_{self.statistic}_{self.time.daterange_option}_at_{datetime.now().date().isoformat()}"
+        addition = "_".join(str(i) for i in sorted(self.moreOptions.values()))
+        return "_".join(str(i) for i in [self.variable.name, self.statistic, self.time.daterange_option, addition, "at", datetime.now().date().isoformat()])
 
     def get_title(self) -> str:
         """creation of a title for metadata of a xarray according to the CF convention
@@ -185,6 +186,8 @@ class AnalysisRequestResult:
 
 def get_global_attributes(metadata: Metadata) -> Dict:
     """combination of global metadata with request specific values.
+    Also adds all additional options passed to the request as meta data. 
+    Throws an exception if moreOptions contains an key already in use by the metadata.
     """
     dynamic_cf_attributes = {
         "id": metadata.get_id(),
@@ -202,6 +205,11 @@ def get_global_attributes(metadata: Metadata) -> Dict:
         # "time_coverage_duration": 0, # TODO insert durations
         # "time_coverage_resolution": 0,
     }
+    for key, value in metadata.moreOptions.items():
+        if not key in dynamic_cf_attributes:
+            dynamic_cf_attributes[key] = value
+        else:
+            raise ValueError(f"{key} is already has the value {dynamic_cf_attributes[key]}. Prohibited overriding with \"{value}\"!")
     cf_attributes = dynamic_cf_attributes | global_cf_attributes
     return cf_attributes
 
diff --git a/toargridding/toar_rest_client.py b/toargridding/toar_rest_client.py
index 7c2406787e9190824af61d8021c611098e9d89f2..9c7ff534cd136344aef64dd9c319e6e7b3484e68 100644
--- a/toargridding/toar_rest_client.py
+++ b/toargridding/toar_rest_client.py
@@ -99,7 +99,7 @@ class Cache:
     
     """
 
-    def __init__(self, cache_dir : Path):
+    def __init__(self, cache_dir : Path, fn = "status_endpoints" ):
         """constructor
         
         Throws exception if cache directory does not exists.
@@ -111,7 +111,7 @@ class Cache:
         
         if not cache_dir.exists():
             raise RuntimeError(f"Given directory for saving cache file does not exists. Path: {cache_dir}")
-        self.cache_file = cache_dir / "status_endpoints.json"
+        self.cache_file = cache_dir / f"{fn}.json"
 
         if not self.cache_file.is_file():  # initialize cache with dummy values
             with open(self.cache_file, "w") as cache:
@@ -171,6 +171,7 @@ class Connection:
 
         self.endpoint = endpoint
         self.cache = Cache(cache_dir)
+        self.cache_backup = Cache(cache_dir, "status_endpoints.old")
         # max wait time is 30min
         self.wait_seconds = [minutes * 60 for minutes in (5, 5, 5, 5, 5, 5)]
 
@@ -180,13 +181,22 @@ class Connection:
         This is the main function to obtained data from the TOAR DB. It will start requests or lookup if an already started requests is finished.
 
         Throws an exception, if the results are not available after the waiting time. A restart of the function continues the regular lookup for results.
+        This function catches possible connection issues and continues to 
         """
         status_endpoint = self.get_status_endpoint(query_options)
 
         for i, wait_time in enumerate(self.wait_seconds):
             print(f"try: {i+1}, wait_time: {wait_time}")
             response = self.wait_and_get(status_endpoint, wait_secs=wait_time)
+            #do error handling i.e. look for connection issues
+            try:
+                response.raise_for_status()
+            except requests.exceptions.HTTPError as e: 
+                print(f"\tconnection error ({e.response.status_code}: {e.response.reason}). Trying again later")
+                continue
+            #are our results ready to obtain?
             if response.headers["Content-Type"] == "application/zip":
+                print("Results are available for download")
                 return response
         else:
             raise RuntimeError(
@@ -201,6 +211,8 @@ class Connection:
         If the cache knows the endpoint, but the DB has deleted it, the endpoint is removed from the cache and a new request is started.
         Otherwise a new new request is started.
         
+        Throws an RuntimeError in case, of a connection error or any other error. In case of an HTTPError, the request is removed from the cache
+        
         Parameters:
         ----------
         Options for the request.
@@ -210,8 +222,22 @@ class Connection:
 
             try:  # test for stale cache
                 self.wait_and_get(status_endpoint).raise_for_status()
-            except requests.exceptions.HTTPError:
-                self.cache.remove(query_options.cache_key)
+            except requests.exceptions.ReadTimeout as e:
+                print("Caught read timeout.")
+                raise RuntimeError("Connection to TAORDB timed out (ReadTimeout) while checking cached status point. Please try again later.")
+            except requests.exceptions.HTTPError as e:
+                #TODO add detailed processing: What was the reason for the error? Do we really need to create a new request or is there another problem, that might resolve by simply waiting
+                print(f"A connection error occurred:")
+                print(f"Status Code: {e.response.status_code}")
+                print(f"Reason: {e.response.reason}")
+                print(f"Text: {e.response.text}")
+                print(f"Status Endpoint: {status_endpoint}")
+                #use inverse order for saving. the status endpoint should be more unique
+                self.cache_backup.put(status_endpoint, query_options.cache_key)
+                #will be overwritten in the next step...
+                #self.cache.remove(query_options.cache_key)
+            except:
+                raise RuntimeError(f"An error occurred during accessing a cached request")
             else:
                 print("load status endpoint from cache")
                 return status_endpoint
@@ -225,15 +251,30 @@ class Connection:
         """create and new request to the TOAR DB.
 
         Adds the status endpoint of the request to the cache. 
+
+        Throws an exception if the TOAR Db returns an error.
         
         Parameters:
         ----------
         query_options:
             request to the TOAR database.
         """
-        response = self.wait_and_get(self.endpoint, asdict(query_options, dict_factory=quarryToDict))
         try:
-            status_endpoint = response.json()["status"]
+            response = self.wait_and_get(self.endpoint, asdict(query_options, dict_factory=quarryToDict))
+        except requests.exceptions.HTTPError as e:
+            print(f"A connection error occurred:")
+            print(f"Status Code: {e.response.status_code}")
+            print(f"Reason: {e.response.reason}")
+            print(f"Text: {e.response.text}")
+            raise e
+        except requests.exceptions.ReadTimeout as e:
+            print("Caught read timeout.")
+            raise RuntimeError("Read timeout while querying for status endpoint")
+        try:
+            if response.headers["Content-Type"] == "application/json":
+                status_endpoint = response.json()["status"]
+            else:
+                raise Exception( f"Unexpected type of response: {response.headers['Content-Type']}" )
         except:
             raise RuntimeError(f"Request was not successful. Response by TOAR database: {response.text}")
         self.cache.put(query_options.cache_key, status_endpoint)
@@ -338,12 +379,15 @@ class AnalysisService:
         ##here we observe some differences in the number of timestamps.
         # remove data where utc -> sun/local ? time conversion leads to dateshift
         newDates = metadata.time.as_datetime_index()
-        if len(timeseries.columns) == len(newDates)+2:
-            print(f"Info: removed columns {timeseries.columns[0]} and {timeseries.columns[-1]} to match data range of {newDates[0]} to {newDates[-1]}")
-            timeseries.drop(columns=[first, last], inplace=True)
-        elif len(timeseries.columns) == len(newDates)+1:
+        lenDiff = len(timeseries.columns) - len(newDates)
+        if lenDiff == 0:
+            print(f"Info: Obtained data range covers {newDates[0]} to {newDates[-1]}")
+        elif lenDiff == 1:
             print(f"Info: removed columns {timeseries.columns[-1]} to match data range of {newDates[0]} to {newDates[-1]}")
             timeseries.drop(columns=[last], inplace=True)
+        elif lenDiff == 2:
+            print(f"Info: removed columns {timeseries.columns[0]} and {timeseries.columns[-1]} to match data range of {newDates[0]} to {newDates[-1]}")
+            timeseries.drop(columns=[first, last], inplace=True)
         else:
             raise RuntimeError(f"There is a mismatch in the timestamps...\nDownloaded:{timeseries.columns}\nFrom Metadata: {newDates}")
         timeseries.columns = newDates 
@@ -427,9 +471,12 @@ class AnalysisServiceDownload(AnalysisService):
         needs_fresh_download = (not self.use_downloaded) or (not filename.is_file())
 
         if needs_fresh_download:
+            print("Performing request to TOAR DB")
             response = self.connection.get(query_options)
             with open(filename, "w+b") as downloaded_file:
                 downloaded_file.write(response.content)
+        else:
+            print(f"Loading already downloaded data from {filename}")
 
         with open(filename, "r+b") as data_file:
             content = data_file.read()
@@ -447,5 +494,5 @@ class AnalysisServiceDownload(AnalysisService):
         metadata:
             metadata for the request.
         """
-        addition = "_".join(metadata.moreOptions.values())
-        return f"{metadata.statistic}_{metadata.time.sampling}_{metadata.variable.cf_standardname}_{metadata.time.start.date()}_{metadata.time.end.date()}_{addition}.zip"
+        addition = "_".join(str(i) for i in sorted(metadata.moreOptions.values()))
+        return "_".join(str(i) for i in [metadata.statistic, metadata.time.sampling, metadata.variable.cf_standardname, metadata.time.start.date(), metadata.time.end.date(), addition]) + ".zip"