diff --git a/src/helpers.py b/src/helpers.py
index ec6aeb5621a473f9f531cdaa3368dea9c4272ee6..f4ac45b2e25156bf8d61990f8e45f09cc0aff4a8 100644
--- a/src/helpers.py
+++ b/src/helpers.py
@@ -14,7 +14,13 @@ def to_list(arg):
     return arg
 
 
-def l_p_loss(power):
+def l_p_loss(power: int):
+    """
+    Calculate the L<p> loss for given power p. L1 (p=1) is equal to mean absolute error (MAE), L2 (p=2) is to mean
+    squared error (MSE), ...
+    :param power: set the power of the error calculus
+    :return: loss for given power
+    """
     def loss(y_true, y_pred):
         return K.mean(K.pow(K.abs(y_pred - y_true), power), axis=-1)
     return loss
diff --git a/test/test_helpers.py b/test/test_helpers.py
index 163d2682d9b4b856afeb2f425484046ed3cb657f..bc64176e10cac71471ecc78efbcb639bc9fab81f 100644
--- a/test/test_helpers.py
+++ b/test/test_helpers.py
@@ -13,5 +13,9 @@ class TestLoss:
         model = keras.Sequential()
         model.add(keras.layers.Lambda(lambda x: x, input_shape=(None, )))
         model.compile(optimizer=keras.optimizers.Adam(), loss=l_p_loss(2))
-        hist = model.fit(np.array([1, 0]), np.array([1, 1]), epochs=1)
-        assert hist.history['loss'][0] == 0.5
+        hist = model.fit(np.array([1, 0, 2, 0.5]), np.array([1, 1, 0, 0.5]), epochs=1)
+        assert hist.history['loss'][0] == 1.25
+        model.compile(optimizer=keras.optimizers.Adam(), loss=l_p_loss(3))
+        hist = model.fit(np.array([1, 0, -2, 0.5]), np.array([1, 1, 0, 0.5]), epochs=1)
+        assert hist.history['loss'][0] == 2.25
+