diff --git a/evaluation_script/__init__.py b/evaluation_script/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f54386693d83294befee949a7b1b42da8d2e58c3
--- /dev/null
+++ b/evaluation_script/__init__.py
@@ -0,0 +1 @@
+from .main import evaluate
diff --git a/evaluation_script/main.py b/evaluation_script/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f7cf56d2c5ec348baf049a7bd9cabcc1455c6cd
--- /dev/null
+++ b/evaluation_script/main.py
@@ -0,0 +1,68 @@
+import random
+import numpy as np
+import pandas as pd
+from sklearn.metrics import accuracy_score
+
+def evaluate(test_annotation_file, user_submission_file, phase_codename, **kwargs):
+    print("Starting Evaluation.....")
+    """
+    Evaluates the submission for a particular challenge phase and returns score
+    Arguments:
+
+        `test_annotations_file`: Path to test_annotation_file on the server
+        `user_submission_file`: Path to file submitted by the user
+        `phase_codename`: Phase to which submission is made
+
+        `**kwargs`: keyword arguments that contains additional submission
+        metadata that challenge hosts can use to send slack notification.
+        You can access the submission metadata
+        with kwargs['submission_metadata']
+
+        Example: A sample submission metadata can be accessed like this:
+        >>> print(kwargs['submission_metadata'])
+        {
+            'status': u'running',
+            'when_made_public': None,
+            'participant_team': 5,
+            'input_file': 'https://abc.xyz/path/to/submission/file.json',
+            'execution_time': u'123',
+            'publication_url': u'ABC',
+            'challenge_phase': 1,
+            'created_by': u'ABC',
+            'stdout_file': 'https://abc.xyz/path/to/stdout/file.json',
+            'method_name': u'Test',
+            'stderr_file': 'https://abc.xyz/path/to/stderr/file.json',
+            'participant_team_name': u'Test Team',
+            'project_url': u'http://foo.bar',
+            'method_description': u'ABC',
+            'is_public': False,
+            'submission_result_file': 'https://abc.xyz/path/result/file.json',
+            'id': 123,
+            'submitted_at': u'2017-03-20T19:22:03.880652Z'
+        }
+    """
+    output = {}
+    
+    target = read_annotation(test_annotation_file)
+    pred = read_annotation(user_submission_file)
+    split = phase_codename
+    print(f"Evaluating for {phase_codename} Phase")
+    output["result"] = [
+        {
+            split: {
+                # Please add your metrics here
+                "accuracy": accuracy_score(target, pred),
+            }
+        }
+    ]
+    # To display the results in the result file
+    output["submission_result"] = output["result"][0][split]
+    print(f"Completed evaluation for {phase_codename} Phase")
+    return output
+
+def read_annotation(path):
+    df = pd.read_csv(path)
+    output_cols = ["image", "label"]
+    assert set(output_cols).issubset(set(df.columns))
+    df = df[output_cols]
+    return df