diff --git a/toardb/contacts/crud.py b/toardb/contacts/crud.py
index 1e3e2cd9d682c0a1ab413086773381d4fdfacf42..16fef48b0ae22ce4b99a00787efbff5cbc48ae56 100644
--- a/toardb/contacts/crud.py
+++ b/toardb/contacts/crud.py
@@ -21,7 +21,7 @@ def get_all_organisations(db: Session, skip : int = 0, limit: int = None):
 
 
 def get_organisation_by_name(db: Session, name: str):
-    return db.query(models.Organisation).filter(models.Organisation.name == name).first()
+    return db.query(models.Organisation).filter(models.Organisation.name == name.upper()).first()
  
 
 def create_organisation(db: Session, organisation: OrganisationCreate):
diff --git a/toardb/contacts/models.py b/toardb/contacts/models.py
index a3babb0972f40a94f2667c9c8ecd23e2db4421b5..cce5e27bdede16381263359820c64f6bb5f66dd3 100644
--- a/toardb/contacts/models.py
+++ b/toardb/contacts/models.py
@@ -29,11 +29,12 @@ Enumdict=namedtuple("Dict",["value","string","display_str"])
 OK_enum = (
     Enumdict(1, 'Government', 'government'),
     Enumdict(2, 'Research', 'research'),
-    Enumdict(3, 'International', 'international'),
-    Enumdict(4, 'NonProfit', 'non-profit'),
-    Enumdict(5, 'Commercial', 'commercial'),
-    Enumdict(6, 'Individual', 'individual'),
-    Enumdict(7, 'Other', 'other')
+    Enumdict(3, 'University', 'university'),
+    Enumdict(4, 'International', 'international'),
+    Enumdict(5, 'NonProfit', 'non-profit'),
+    Enumdict(6, 'Commercial', 'commercial'),
+    Enumdict(7, 'Individual', 'individual'),
+    Enumdict(8, 'Other', 'other')
     )
 
 CONTACTS_ID_SEQ = Sequence('contacts_id_seq')  # define sequence explicitly
diff --git a/toardb/data/crud.py b/toardb/data/crud.py
index 7f7d1b07f3a7a60d8dde83a09c49f06984273ce4..c815174e01fe1a8701e45a7ff2a7545fe906ba9f 100644
--- a/toardb/data/crud.py
+++ b/toardb/data/crud.py
@@ -27,6 +27,7 @@ from io import StringIO
 import csv
 
 def get_data(db: Session, timeseries_id: int, format: str):
+    print("now in crud!")
     data = db.query(models.Data).filter(models.Data.timeseries_id == timeseries_id).all()
     if format == 'json':
         return data
@@ -52,8 +53,8 @@ def get_data_by_datetime_and_timeseriesid(db: Session, datetime: dt.datetime, ti
     return db.query(models.Data).filter([models.Data.datetime== datetime, models.Data.timeseries_id == timeseries_id]).first()
 
 
-def get_all_data(db: Session, skip : int = 0, limit: int = None):
-    return db.query(models.Data).offset(skip).limit(limit).all()
+def get_all_data(db: Session, skip : int = 0, limit: int = 100):
+    return db.query(models.Data).limit(limit).all()
 
 
 def create_data_record(db: Session, engine: Engine,
diff --git a/toardb/data/data.py b/toardb/data/data.py
index cffb54485c49d419461fa9f7e9ce4c1cccf8e6c1..275587b257c20260b8d88d15a7edd4309963a892 100644
--- a/toardb/data/data.py
+++ b/toardb/data/data.py
@@ -18,7 +18,7 @@ router = APIRouter()
 
 #get all data of table data
 @router.get('/data/', response_model=List[schemas.Data])
-def get_all_data(skip: int = 0, limit: int = None, db: Session = Depends(get_db)):
+def get_all_data(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
     data = crud.get_all_data(db, skip=skip, limit=limit)
     return data
 
@@ -30,6 +30,15 @@ def get_data(timeseries_id: int, format: str = 'json', db: Session = Depends(get
         raise HTTPException(status_code=404, detail="Data not found.")
     return db_data
 
+#get all data of one timeseries
+@router.get('/data/id/{timeseries_id}', response_model=List[schemas.Data])
+def get_data2(timeseries_id: int, format: str = 'json', db: Session = Depends(get_db)):
+    print("in data.py!")
+    db_data = crud.get_data(db, timeseries_id=timeseries_id, format=format)
+    if db_data is None:
+        raise HTTPException(status_code=404, detail="Data not found.")
+    return db_data
+
 #some more gets to be tested:
 # - get all data of one special timestamp
 # - get all data of one special timestamp and timeseries
diff --git a/toardb/stationmeta/crud.py b/toardb/stationmeta/crud.py
index f1a153fd06b21f5d931af988b8e2e236c70dbf4f..e4147238e14d71635cedbb8e733045c40cf81e3e 100644
--- a/toardb/stationmeta/crud.py
+++ b/toardb/stationmeta/crud.py
@@ -43,6 +43,16 @@ def get_stationmeta(db: Session, station_code: str):
     return db_object
 
 
+def get_stationmeta_by_id(db: Session, station_id: int):
+    db_object = db.query(models.StationmetaCore).filter(models.StationmetaCore.id == station_id).first()
+    # there is a mismatch with coordinates and additional_metadata
+    if db_object:
+        if isinstance(db_object.coordinates, (WKBElement, WKTElement)):
+            db_object.coordinates = get_coordinates_from_geom(db_object.coordinates)
+        db_object.additional_metadata = str(db_object.additional_metadata).replace("'",'"')
+    return db_object
+
+
 def get_all_stationmeta_core(db: Session, skip : int = 0, limit: int = None):
     db_objects = db.query(models.StationmetaCore).offset(skip).limit(limit).all()
     for db_object in db_objects:
diff --git a/toardb/stationmeta/stationmeta.py b/toardb/stationmeta/stationmeta.py
index ec7f50df79341c096967c2eb52b67d06a947564b..cdf5ccce21722747152f906758aa307ad66cddf6 100644
--- a/toardb/stationmeta/stationmeta.py
+++ b/toardb/stationmeta/stationmeta.py
@@ -31,7 +31,7 @@ def get_all_stationmeta(skip: int = 0, limit: int = None, db: Session = Depends(
 def get_stationmeta_core(station_code: str, db: Session = Depends(get_db)):
     db_stationmeta_core = crud.get_stationmeta_core(db, station_code=station_code)
     if db_stationmeta_core is None:
-        raise HTTPException(status_code=404, detail="Data not found.")
+        raise HTTPException(status_code=404, detail=f"Metadata for station '{station_code}' not found.")
     return db_stationmeta_core
 
 # the same as above, but nested view
@@ -40,7 +40,15 @@ def get_stationmeta_core(station_code: str, db: Session = Depends(get_db)):
 def get_stationmeta(station_code: str, db: Session = Depends(get_db)):
     db_stationmeta = crud.get_stationmeta(db, station_code=station_code)
     if db_stationmeta is None:
-        raise HTTPException(status_code=404, detail="Data not found.")
+        raise HTTPException(status_code=404, detail=f"Metadata for station '{station_code}' not found.")
+    return db_stationmeta
+
+#get all core metadata of one station (given its ID)
+@router.get('/stationmeta/id/{station_id}', response_model=schemas.Stationmeta)
+def get_stationmeta_by_id(station_id: int, db: Session = Depends(get_db)):
+    db_stationmeta = crud.get_stationmeta_by_id(db, station_id=station_id)
+    if db_stationmeta is None:
+        raise HTTPException(status_code=404, detail=f"Metadata for station with ID '{station_id}' not found.")
     return db_stationmeta
 
 @router.get('/stationmeta_changelog/{station_id}', response_model=List[schemas.StationmetaChangelog])
diff --git a/toardb/timeseries/models_core.py b/toardb/timeseries/models_core.py
index 691f2c42848d5aa1d257b5b66231bf8f64557757..ea095e3c079ae18b0b0e85ed90a2c9a6600603c9 100644
--- a/toardb/timeseries/models_core.py
+++ b/toardb/timeseries/models_core.py
@@ -101,7 +101,6 @@ class Timeseries(Base):
     data_end_date = Column(DateTime(True), nullable=False)
     measurement_method = Column(ForeignKey('mm_vocabulary.enum_val'), nullable=False, server_default=text("1"))
     sampling_height = Column(Float(53), nullable=False)
-    additional_metadata = Column(JSONB(astext_type=Text()), nullable=True)
     date_added = Column(DateTime(True), nullable=False, server_default=text("now()"))
     date_modified = Column(DateTime(True), nullable=False, server_default=text("now()"))
 # do not use string declaration here (not working for pytest)
@@ -118,6 +117,8 @@ class Timeseries(Base):
     programme = relationship('TimeseriesProgramme')
     changelog = relationship('TimeseriesChangelog')
 
+    additional_metadata = Column(JSONB(astext_type=Text()), nullable=True)
+
 #   da_vocabulary = relationship('DaVocabulary')
 #   at_vocabulary = relationship('AtVocabulary')
 #   mm_vocabulary = relationship('MmVocabulary')
diff --git a/toardb/timeseries/schemas.py b/toardb/timeseries/schemas.py
index 8cae379d4b01e8815a03b52b6a80a1e0b985834c..649495adef6acd34baa491999cfe99a14c856d7d 100644
--- a/toardb/timeseries/schemas.py
+++ b/toardb/timeseries/schemas.py
@@ -21,7 +21,7 @@ from toardb.stationmeta.schemas import StationmetaCoreBase
 
 # ======== Timeseries =========
 
-class TimeseriesCoreBaseStub(BaseModel):
+class TimeseriesCoreBase(BaseModel):
     id: int = None
     label: str = Field(..., description="a short string to distinguish this timeseries from others with the same combination of station and variable")
     order: int = Field(..., description="indicates position of this timeseries in a list when several timeseries share the same station and variable combination")
@@ -62,12 +62,6 @@ class TimeseriesCoreBaseStub(BaseModel):
         return tuple(filter(lambda x: x.value == int(v), MM_enum))[0].string
 
 
-class TimeseriesCoreBase(TimeseriesCoreBaseStub):
-    station_id: int
-    variable_id: int
-    programme_id: int = None
-    
-
 class TimeseriesCoreCreate(TimeseriesCoreBase):
     pass
 
@@ -236,10 +230,26 @@ class TimeseriesChangelog(TimeseriesChangelogBase):
 
 # ======== for nested view/upload =========
 
-class TimeseriesBase(TimeseriesCoreBaseStub):
+class TimeseriesBase(TimeseriesCoreBase):
+    label: str
+    order: int
+    access_rights: str
+    sampling_frequency: str
+    aggregation: str
+    source: str
+    data_start_date: dt.datetime
+    data_end_date: dt.datetime
+    measurement_method: str
+    sampling_height: float
+    date_added: dt.datetime
+    date_modified: dt.datetime
+    station: StationmetaCoreBase
+    variable: Variable
+    programme: TimeseriesProgramme
+    additional_metadata: Json
     roles: List[TimeseriesRole] = None
     annotations: List[TimeseriesAnnotation] = None
-    variable: Variable
+    changelog: List[TimeseriesChangelog] = None
 #   station: StationmetaCoreCreate
 #   station: StationmetaCore
 #   station: StationmetaCreate
@@ -247,9 +257,6 @@ class TimeseriesBase(TimeseriesCoreBaseStub):
 #   station: Stationmeta
 # Try, which one of the above is wanted for station
 # The next one works:
-    station: StationmetaCoreBase
-    programme: TimeseriesProgramme
-    changelog: List[TimeseriesChangelog] = None
 
     class Config:
         orm_mode = True
@@ -268,7 +275,6 @@ class TimeseriesPatch(BaseModel):
     sampling_height: float = None
     date_added: dt.datetime = None
     date_modified: dt.datetime = None
-    additional_metadata: Json = None
 #   roles: List[TimeseriesRole] = None
 #   annotations: List[TimeseriesAnnotation] = None
 #   variable: Variable = None
@@ -277,6 +283,7 @@ class TimeseriesPatch(BaseModel):
     station_id: int = None
     variable_id: int = None
     programme_id: int = None
+    additional_metadata: Json = None
 
     class Config:
         orm_mode = True
diff --git a/toardb/timeseries/timeseries.py b/toardb/timeseries/timeseries.py
index d8d569a920f7e43c58a083e46a020d0631570ffd..83a7b9180c7b1bef34680c1065b7da7c15a01f4e 100644
--- a/toardb/timeseries/timeseries.py
+++ b/toardb/timeseries/timeseries.py
@@ -28,6 +28,14 @@ def get_timeseries(timeseries_id: int, db: Session = Depends(get_db)):
         raise HTTPException(status_code=404, detail="Timeseries not found.")
     return db_timeseries
 
+#get all metadata of one timeseries (known its ID)
+@router.get('/timeseries/id/{timeseries_id}', response_model=schemas.Timeseries)
+def get_timeseries2(timeseries_id: int, db: Session = Depends(get_db)):
+    db_timeseries = crud.get_timeseries(db, timeseries_id=timeseries_id)
+    if db_timeseries is None:
+        raise HTTPException(status_code=404, detail="Timeseries not found.")
+    return db_timeseries
+
 #get all metadata of one timeseries (known its unique label)
 @router.get('/timeseries/unique/', response_model=schemas.Timeseries)
 def get_timeseries(station_id: int, variable_id: int, resource_provider: str , label: str='', db: Session = Depends(get_db)):
diff --git a/toardb/toardb.py b/toardb/toardb.py
index 92f6893abf3a89a185b3bcb9ce23782d55fe4815..4b5840e840b96c44ab514687ec08acef16d1fa9e 100644
--- a/toardb/toardb.py
+++ b/toardb/toardb.py
@@ -4,6 +4,7 @@ Simple test API for variable management
 
 from typing import List
 from fastapi import FastAPI, Depends, HTTPException, APIRouter
+from fastapi.responses import JSONResponse
 from sqlalchemy.orm import Session
 
 from toardb.utils.database import ToarDbSession, engine
@@ -33,6 +34,8 @@ async def info():
         "Measurement Methods": settings.MM_vocab,
         "Climatic Zones": settings.CZ_vocab,
         "Coordinate Validity": settings.CV_vocab,
+        "Countries": settings.CN_vocab,
+        "Timezones": settings.TZ_vocab,
         "Station Types": settings.ST_vocab,
         "Station Type of Area": settings.TA_vocab,
         "Station TOAR Categories": settings.TC_vocab,
@@ -47,27 +50,34 @@ async def info():
 @app.get("/controlled_vocabulary/{name}")
 async def info(name: str):
     controlled_vocabulary = {
-        "Role Codes": settings.RC_vocab,
-        "Role Status": settings.RS_vocab,
-        "Kind of Annotations": settings.AK_vocab,
-        "Kind of Organizations": settings.OK_vocab,
-        "Data Access Rights": settings.DA_vocab,
-        "Sampling Frequencies": settings.SF_vocab,
-        "Aggregation Types": settings.AT_vocab,
-        "Data Sources": settings.DS_vocab,
-        "Measurement Methods": settings.MM_vocab,
-        "Climatic Zones": settings.CZ_vocab,
-        "Coordinate Validity": settings.CV_vocab,
-        "Station Types": settings.ST_vocab,
-        "Station Type of Area": settings.TA_vocab,
-        "Station TOAR Categories": settings.TC_vocab,
-        "Station HTAP Regions": settings.TR_vocab,
-        "Station Dominant Landcover Types": settings.DL_vocab,
-        "Result Types": settings.RT_vocab,
-        "Data Flags": settings.DF_vocab,
-        "Type of Change": settings.CL_vocab,
+        "role codes": settings.RC_vocab,
+        "role status": settings.RS_vocab,
+        "kind of ANnotations": settings.AK_vocab,
+        "kind of organizations": settings.OK_vocab,
+        "data access rights": settings.DA_vocab,
+        "sampling frequencies": settings.SF_vocab,
+        "aggregation types": settings.AT_vocab,
+        "data sources": settings.DS_vocab,
+        "measurement methods": settings.MM_vocab,
+        "climatic zones": settings.CZ_vocab,
+        "coordinate validity": settings.CV_vocab,
+        "countries": settings.CN_vocab,
+        "timezones": settings.TZ_vocab,
+        "station types": settings.ST_vocab,
+        "station type of Area": settings.TA_vocab,
+        "station tOAR Categories": settings.TC_vocab,
+        "station hTAP Regions": settings.TR_vocab,
+        "station dominant Landcover Types": settings.DL_vocab,
+        "result types": settings.RT_vocab,
+        "data flags": settings.DF_vocab,
+        "type of change": settings.CL_vocab,
     }
-    return controlled_vocabulary[name]
+    if name.lower() in controlled_vocabulary.keys():
+        return controlled_vocabulary[name.lower()]
+    else:
+        status_code=200
+        message = f"No controlled vocabulary found for '{name}'"
+        return JSONResponse(status_code=status_code, content=message)
 
 db_stats= {
         "users": 358,
@@ -130,6 +140,8 @@ async def startup_event():
     MM_vocabulary = __get_enum_dict(fake_cur, "mm_vocabulary")
     CZ_vocabulary = __get_enum_dict(fake_cur, "cz_vocabulary")
     CV_vocabulary = __get_enum_dict(fake_cur, "cv_vocabulary")
+    CN_vocabulary = __get_enum_dict(fake_cur, "cn_vocabulary")
+    TZ_vocabulary = __get_enum_dict(fake_cur, "tz_vocabulary")
     ST_vocabulary = __get_enum_dict(fake_cur, "st_vocabulary")
     TA_vocabulary = __get_enum_dict(fake_cur, "ta_vocabulary")
     TC_vocabulary = __get_enum_dict(fake_cur, "tc_vocabulary")
@@ -154,6 +166,8 @@ DS_vocabulary = __get_enum_dict(fake_cur, "ds_vocabulary")
 MM_vocabulary = __get_enum_dict(fake_cur, "mm_vocabulary")
 CZ_vocabulary = __get_enum_dict(fake_cur, "cz_vocabulary")
 CV_vocabulary = __get_enum_dict(fake_cur, "cv_vocabulary")
+CN_vocabulary = __get_enum_dict(fake_cur, "cn_vocabulary")
+TZ_vocabulary = __get_enum_dict(fake_cur, "tz_vocabulary")
 ST_vocabulary = __get_enum_dict(fake_cur, "st_vocabulary")
 TA_vocabulary = __get_enum_dict(fake_cur, "ta_vocabulary")
 TC_vocabulary = __get_enum_dict(fake_cur, "tc_vocabulary")
@@ -177,6 +191,8 @@ class Settings(BaseSettings):
     MM_vocab = MM_vocabulary
     CZ_vocab = CZ_vocabulary
     CV_vocab = CV_vocabulary
+    CN_vocab = CN_vocabulary
+    TZ_vocab = TZ_vocabulary
     ST_vocab = ST_vocabulary
     TA_vocab = TA_vocabulary
     TC_vocab = TC_vocabulary