diff --git a/measures.py b/measures.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d76e14dbc943031e20844d96adfd9b85cd1db90
--- /dev/null
+++ b/measures.py
@@ -0,0 +1,104 @@
+from sklearn.metrics import confusion_matrix
+import pandas as pd
+
+def get_confusion_matrix(y_true,y_predicted):
+    cm=confusion_matrix(y_true, y_predicted)    
+    return cm.ravel()
+
+
+
+def get_measure_values(TN, FP, FN, TP):
+    '''
+    This function computes the quality measures and organises them in the preferred order
+    '''
+    labels = ['Accuracy','Precision','FM','F1','TS','Recall','GM','BA','Specifity','TS*','F1*','FM*','NPV']
+    y=[accuracy(TP, TN, FP, FN),precision(TP, FP),FM(TP, FP, FN),F1(TP, FP, FN),TS(TP, FN, FP),recall(TP, FN),GM(TP, FN, TN, FP),BA(TP, FP,TN,FN),
+       specifity(TN, FP),negTS(TN, FN, FP),negF1(TN, FP, FN),negFM(TN, FP, FN), NPV(TN, FN)
+       ]
+    cm = pd.DataFrame(index=['Actual Positive','Actual Negative'],data={'Predicted Positive':[TP,FP],'Predicted Negative':[FN,TN]})
+    return labels,y,cm
+
+
+
+'''
+The quality measures used for the IGQRs. The value -1 encodes invalid values due to division by zero.
+'''
+
+#accuracy
+def accuracy(TP,TN,FP,FN):
+    return (TP+TN)/(TP+TN+FP+FN)
+
+
+#recall, TPR, sensitivity
+def recall(TP,FN):
+    if TP+FN==0:
+        return -1
+    return TP/(TP+FN)
+
+#specifity, selectivity, TNR
+def specifity(TN,FP):
+    if TN+FP==0:
+        return -1
+    return TN/(TN+FP)
+
+#precision, positive predictive value (PPV)
+def precision(TP,FP):
+    if TP+FP==0:
+        return -1
+    return TP/(TP+FP)
+
+#negative predictive value (NPV)
+def NPV(TN,FN):
+    if TN+FN==0:
+        return -1
+    return TN/(TN+FN)
+
+
+#threat score (TS) 
+def TS(TP,FN,FP):
+    if TP+FN+FP==0:
+        return -1
+    return TP/(TP+FN+FP)
+
+def negTS(TN,FN,FP):
+    if TN+FN+FP==0:
+        return -1
+    return TN/(TN+FN+FP)
+
+
+#balanced accuracy (BA)
+def BA(TP,FP,TN,FN):
+    if specifity(TN,FP)==-1:
+        return -1
+    return (recall(TP,FN)+specifity(TN,FP))/2
+
+#F1 Score
+def F1(TP,FP,FN):
+    if 2*TP+FP+FN==0:
+        return -1
+    return 2*TP/(2*TP+FP+FN)
+
+def negF1(TN,FP,FN):
+    if NPV(TN,FN)+specifity(TN,FP)==0:
+        return -1
+    return (2*NPV(TN,FN)*specifity(TN,FP))/(NPV(TN,FN)+specifity(TN,FP))
+
+
+#fowlkes-mallows index (FM)
+def FM(TP,FP,FN):
+    if TP+FP==0 or TP+FN==0:
+        return -1
+    return ((TP/(TP+FP)*TP/(TP+FN))**0.5)
+
+def negFM(TN,FP,FN):
+    if NPV(TN, FN)==-1 or specifity(TN,FP)==-1:
+        return -1
+    return ((NPV(TN,FN)*specifity(TN,FP))**0.5)
+
+
+#geomeasure mean (of recall and specifity)
+def GM(TP,FN,TN,FP):
+    if specifity(TN, FP) ==-1:
+        return -1
+    return (recall(TP, FN)*specifity(TN, FP))**0.5
+