File size: 43,436 Bytes
4b0c608
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
"keyword","repo_name","file_path","file_extension","file_size","line_count","content","language"
"Biochemistry","Bin-Cao/TCLRmodel","Template/Execution template/template.py",".py","5138","104","#coding=utf-8
from TCLR import TCLRalgorithm as model


""""""
    :param correlation : {'PearsonR(+)','PearsonR(-)',''MIC','R2'},default PearsonR(+).
            Methods:
            * PearsonR: (+)(-). for linear relationship.
            * MIC for no-linear relationship.
            * R2 for no-linear relationship.

    :param tolerance_list: constraints imposed on features, default is null
            list shape in two dimensions, viz., [[constraint_1,tol_1],[constraint_2,tol_2]...]
            constraint_1, constraint_2 (string) are the feature name ; 
            tol_1, tol_2 (float)are feature's tolerance ratios;
            relative variation range of features must be within the tolerance;
            example: tolerance_list = [['feature_name1',0.2],['feature_name2',0.1]].
    
    :param gpl_dummyfea: dummy features in gpleran regression, default is null
            list shape in one dimension, viz., ['feature_name1','feature_name2',...]
            dummy features : 'feature_name1','feature_name2',... are not used anymore in gpleran regression     
            
    :param minsize : a int number (default=3), minimum unique values for linear features of data on each leaf.
    
    :param threshold : a float (default=0.9), less than or equal to 1, default 0.95 for PearsonR.
            In the process of dividing the dataset, the smallest relevant index allowed in the you research.
            To avoid overfitting, threshold = 0.5 is suggested for MIC 0.5.
    
    :param mininc : Minimum expected gain of objective function (default=0.01)
    
    :param split_tol : a float (default=0.8), constrained features value shound be narrowed in a minmimu ratio of split_tol on split path

    :param gplearn : Whether to call the embedded gplearn package of TCLR to regress formula (default=False).
    
    :param population_size : integer, optional (default=500), the number of programs in each generation.
    
    :param generations : integer, optional (default=100),the number of generations to evolve.

    :param verbose : int, optional (default=0). Controls the verbosity of the evolution building process.
    
    :param metric : str, optional (default='mean absolute error')
            The name of the raw fitness metric. Available options include:
            - 'mean absolute error'.
            - 'mse' for mean squared error.
            - 'rmse' for root mean squared error.
            - 'pearson', for Pearson's product-moment correlation coefficient.
            - 'spearman' for Spearman's rank-order correlation coefficient.
    
    :param function_set : iterable, optional (default=['add', 'sub', 'mul', 'div', 'log', 'sqrt', 
                                               'abs', 'neg','inv','sin','cos','tan', 'max', 'min'])
            The functions to use when building and evolving programs. This iterable can include strings 
            to indicate either individual functions as outlined below.
            Available individual functions are:
            - 'add' : addition, arity=2.
            - 'sub' : subtraction, arity=2.
            - 'mul' : multiplication, arity=2.
            - 'div' : protected division where a denominator near-zero returns 1.,
                arity=2.
            - 'sqrt' : protected square root where the absolute value of the
                argument is used, arity=1.
            - 'log' : protected log where the absolute value of the argument is
                used and a near-zero argument returns 0., arity=1.
            - 'abs' : absolute value, arity=1.
            - 'neg' : negative, arity=1.
            - 'inv' : protected inverse where a near-zero argument returns 0.,
                arity=1.
            - 'max' : maximum, arity=2.
            - 'min' : minimum, arity=2.
            - 'sin' : sine (radians), arity=1.
            - 'cos' : cosine (radians), arity=1.
            - 'tan' : tangent (radians), arity=1.

    Algorithm Patent No. : 2021SR1951267, China
    Reference : Domain knowledge guided interpretive machine learning ——  Formula discovery for the oxidation behavior of Ferritic-Martensitic steels in supercritical water. Bin Cao et al., 2022, JMI, journal paper.
    DOI : 10.20517/jmi.2022.04
""""""


dataSet = ""testdata.csv""
correlation = 'PearsonR(+)'
tolerance_list = [
    ['E_Cr_split_feature_1',0.001],
]

gpl_dummyfea = ['ln(t)_split_feature_4',]
minsize = 3
threshold = 0.9
mininc = 0.01
split_tol = 0.8
gplearn = True
population_size = 500
generations = 100
verbose = 1 
metric = 'mean absolute error'
function_set = ['add', 'sub', 'mul', 'div', 'log', 'sqrt', 'abs', 'neg','inv','sin','cos','tan', 'max', 'min']


model.start(filePath = dataSet, correlation = correlation, tolerance_list = tolerance_list, gpl_dummyfea = gpl_dummyfea, minsize = minsize, threshold = threshold,
            mininc = mininc ,split_tol = split_tol, gplearn = gplearn,  population_size = population_size,
            generations = generations,verbose = verbose, metric =metric, function_set =function_set)



","Python"
"Biochemistry","Bin-Cao/TCLRmodel","TCLR/TCLRalgorithm.py",".py","38792","861","""""""
    Tree Classifier for Linear Regression (TCLR) 
    Author : Bin CAO (binjacobcao@gmail.com) 

    TCLR is a new tree model proposed by Professor T-Y Zhang and Mr. Bin Cao et al. for capturing the functional relationships 
    between features and target variables. The model partitions the feature space into a set of rectangles, with each partition
    embodying a specific function. This approach is conceptually simple, yet powerful for distinguishing mechanisms. The entire
    feature space is divided into disjointed unit intervals by hyperplanes parallel to the coordinate axes. Within each partition,
    the target variable y is modeled as a linear function of a feature xj (j = 1,⋯,m), which is the linear function used in our studied problem.
    
    Patent No. : 2021SR1951267, China
    Reference : Domain knowledge guided interpretive machine learning ——  Formula discovery for the oxidation behavior of Ferritic-Martensitic steels in supercritical water. Bin Cao et al., 2022, JMI, journal paper.
    DOI : 10.20517/jmi.2022.04
""""""

import math
import re
from tabnanny import check
from textwrap import indent
import time
import copy
import os
import warnings
import random
from typing import List
import numpy as np
import pandas as pd
from graphviz import Digraph
from scipy import stats
from gplearn import genetic
from minepy import MINE


# Define the basic structure of a Tree Model - Node
class Node:
    def __init__(self, data):
        self.data = data
        self.lc = None
        self.rc = None
        self.slope = None
        self.intercept = None
        self.size = data.shape[0]
        self.R = 0
        self.bestFeature = 0
        self.bestValue = 0
        self.leaf_no = -1


def start(filePath, correlation='PearsonR(+)', minsize=3, threshold=0.95, mininc=0.01, split_tol = 0.8, epochs = 5,random_seed=42, Generate_Features = True, tolerance_list = None , weight=True,
         gplearn = False, gpl_dummyfea = None, population_size = 500, generations = 100, verbose = 1, 
         metric = 'mean absolute error',
         function_set = ['add', 'sub', 'mul', 'div', 'log', 'sqrt', 'abs', 'neg','inv','sin','cos','tan',]):
    
    """"""
    Tree Classifier for Linear Regression (TCLR) 

    TCLR is a new tree model proposed by Professor T-Y Zhang and Mr. Bin Cao et al. for capturing the functional relationships 
    between features and target variables. The model partitions the feature space into a set of rectangles, with each partition
    embodying a specific function. This approach is conceptually simple, yet powerful for distinguishing mechanisms. The entire
    feature space is divided into disjointed unit intervals by hyperplanes parallel to the coordinate axes. Within each partition,
    the target variable y is modeled as a linear function of a feature xj (j = 1,⋯,m), which is the linear function used in our studied problem.
    
    Patent No. : 2021SR1951267, China
    Reference : Domain knowledge guided interpretive machine learning ——  Formula discovery for the oxidation behavior of Ferritic-Martensitic steels in supercritical water. Bin Cao et al., 2022, JMI, journal paper.
    DOI : 10.20517/jmi.2022.04

    :param correlation : {'PearsonR(+)','PearsonR(-)',''MIC','R2'}, default PearsonR(+).
            Methods:
            * PearsonR: (+)(-). for linear relationship.
            * MIC for no-linear relationship.
            * R2 for no-linear relationship.

            The evaluation factor for capture the functional relationship between feature and response
            1>
            PearsonR:
            Pearson correlation coefficient, also known as Pearson's r, the Pearson product-moment correlation coefficient.
            PearsonR is a measure of linear correlation between two sets of data. 
            PearsonR = Cov(X,Y) / (sigmaX * sigmaY)
        
            2>
            MIC:
            The maximal information coefficient (MIC). MIC captures a wide range of associations both functional and not, 
            and for functional relationships provides a score that roughly equals the coefficient of determination (R2) of 
            the data relative to the regression function.  MIC belongs to a larger class of maximal information-based 
            nonparametric exploration (MINE) statistics for identifying and classifying relationship.  
            Reference : Reshef, D. N., Reshef, Y. A., Finucane, H. K., Grossman, S. R., McVean, G., Turnbaugh, P. J., ... 
            and Sabeti, P. C. (2011). Detecting novel associations in large data sets. science, 334(6062), 1518-1524.
            
            3>
            R2:
            In statistics, the coefficient of determination, denoted R2 or r2 and pronounced ""R squared"", 
            is the proportion of the variation in the dependent variable that is predictable from the independent variable(s).
            t is a statistic used in the context of statistical models whose main purpose is either the prediction of future 
            outcomes or the testing of hypotheses, on the basis of other related information. It provides a measure of how well
            observed outcomes are replicated by the model, based on the proportion of total variation of outcomes explained by the model.
            Definition from Wikipedia : https://en.wikipedia.org/wiki/Coefficient_of_determination
            R2 = 1 - SSres / SStot. Its value may be a negative one for poor correlation.
 
  
    :param minsize : 
            a int number (default=3), minimum unique values for linear features of data on each leaf.
    
    :param threshold : 
            a float (default=0.9), less than or equal to 1, default 0.95 for PearsonR.
            In the process of dividing the dataset, the smallest relevant index allowed in the you research.
            To avoid overfitting, threshold = 0.5 is suggested for MIC 0.5.
    
    :param mininc : Minimum expected gain of objective function (default=0.01)

    :param split_tol : a float (default=0.8), constrained features value shound be narrowed in a minmimu ratio of split_tol on split path

    :param epochs : an integer (default=5), see parameter Generate_Features (below)

    :param random_seed :  an integer (default=42), see parameter Generate_Features (below)

    :param Generate_Features : boole (default=True). When Generate_Features = True, TCLR will generate new features by operating  
        the ['+','-','*'] on original features. Iterating [param : epoachs] times, and each time generating 3 new features.  [param : random_seed]
        is used to control the randomness.
        When Generate_Features = False, TCLR will apply the original features

    :param tolerance_list: 
            constraints imposed on features, default is null
            list shape in two dimensions, viz., [['feature_name1',tol_1],['feature_name2',tol_2]...]
            'feature_name1', 'feature_name2' (string) are names of input features;
            tol_1, tol_2 (float, between 0 to 1) are feature's tolerance ratios;
            the variations of feature values on each leaf must be in the tolerance;
            if tol_1 = 0, the value of feature 'feature_name1' must be a constant on each leaf,
            if tol_1 = 1, there is no constraints on value of feature 'feature_name1';
            example: tolerance_list = [['feature_name1',0.2],['feature_name2',0.1]].

    :param weight:
            The weight of the gain function, default is True.
            When weight is True: linear_gain = R(father node) - ( W_l * R(left child node) + W_r * R(right child node)) / 2
            Where W_l is the ratio of the number of samples in the left child node to the total number of samples ;
                  W_r is the ratio of the number of samples in the right child node to the total number of samples.
            When weight is False: linear_gain = R(father node) - ( R(left child node) + R(right child node)) / 2

    :param gplearn : Whether to call the embedded gplearn package of TCLR to regress formula (default=False).
    
    :param gpl_dummyfea: 
            dummy features in gpleran regression, default is null
            list shape in one dimension, viz., ['feature_name1','feature_name2',...]
            dummy features : 'feature_name1','feature_name2',... are not used anymore in gpleran regression 

    :param population_size : integer, optional (default=500), the number of programs in each generation.
    
    :param generations : integer, optional (default=100),the number of generations to evolve.

    :param verbose : int, optional (default=0). Controls the verbosity of the evolution building process.
    
    :param metric : 
            str, optional (default='mean absolute error')
            The name of the raw fitness metric. Available options include:
            - 'mean absolute error'.
            - 'mse' for mean squared error.
            - 'rmse' for root mean squared error.
            - 'pearson', for Pearson's product-moment correlation coefficient.
            - 'spearman' for Spearman's rank-order correlation coefficient.
    
    :param function_set : 
            iterable, optional (default=['add', 'sub', 'mul', 'div', 'log', 'sqrt', 
                                               'abs', 'neg','inv','sin','cos','tan', 'max', 'min'])
            The functions to use when building and evolving programs. This iterable can include strings 
            to indicate either individual functions as outlined below.
            Available individual functions are:
            - 'add' : addition, arity=2.
            - 'sub' : subtraction, arity=2.
            - 'mul' : multiplication, arity=2.
            - 'div' : protected division where a denominator near-zero returns 1.,
                arity=2.
            - 'sqrt' : protected square root where the absolute value of the
                argument is used, arity=1.
            - 'log' : protected log where the absolute value of the argument is
                used and a near-zero argument returns 0., arity=1.
            - 'abs' : absolute value, arity=1.
            - 'neg' : negative, arity=1.
            - 'inv' : protected inverse where a near-zero argument returns 0.,
                arity=1.
            - 'max' : maximum, arity=2.
            - 'min' : minimum, arity=2.
            - 'sin' : sine (radians), arity=1.
            - 'cos' : cosine (radians), arity=1.
            - 'tan' : tangent (radians), arity=1.
    
    Exampel :
    #coding=utf-8
    from TCLR import TCLRalgorithm as model

    dataSet = ""testdata.csv""
    correlation = 'PearsonR(+)'
    minsize = 3
    threshold = 0.9
    mininc = 0.01
    split_tol = 0.8

    model.start(filePath = dataSet, correlation = correlation, minsize = minsize, threshold = threshold,
                mininc = mininc ,split_tol = split_tol,)
    """"""

    os.makedirs('Segmented', exist_ok=True)
    # global var. for statisticaling  results
    global record
    record = 0
    timename = time.localtime(time.time())
    namey, nameM, named, nameh, namem = timename.tm_year, timename.tm_mon, timename.tm_mday, timename.tm_hour, timename.tm_min

    read_csvData = pd.read_csv(filePath)

    input_csvData = read_csvData.iloc[:,:-2]
    if Generate_Features == True:
        # cal an appropriate value of batch
        if len(input_csvData) - 1 <= 3:
            batch = 1
        else:
            batch = 3
        # generate new dataset
        for epoch in range(epochs):
            # for increasing the randomness
            random_seed += 1
            input_csvData = generate_random_features(input_csvData,[column for column in input_csvData],batch,random_seed)
           
        input_csvData = input_csvData.assign(linear_X=read_csvData.iloc[:,-2])
        csvData = input_csvData.assign(linear_Y=read_csvData.iloc[:,-1])

    else:
        csvData = read_csvData
    
    
    copy_csvData = copy.deepcopy(csvData)
    copy_csvData['slope'] = None
    copy_csvData['intercept'] = None
    copy_csvData[correlation] = None
    copy_csvData.to_csv('Segmented/all_dataset.csv', index=False)

    feats = [column for column in csvData]
    csvData = np.array(csvData)
    root, _ = createTree(csvData, csvData, feats, 0, correlation,tolerance_list, minsize, threshold, mininc, split_tol,weight)
    
    print('All non-image results have been successfully saved!')
    print('#'*80,'\n')
   
    # excute gplearn 
    if gplearn == True :
        if correlation == 'MIC' or correlation == 'R2':
            print('{name} is a non-linear correlation metrics'.format(name = correlation ))
            print('This is illegal, linear slopes are only allowed to generate when PearsonR is chosen')
        elif correlation == 'PearsonR(+)' or correlation == 'PearsonR(-)':
            sr_data = pd.read_csv('Segmented/all_dataset.csv')
            sr_featurname = sr_data.columns
            sr_data = np.array(sr_data)

            if gpl_dummyfea == None: 

                gpmodel = genetic.SymbolicRegressor(
                    population_size = population_size, generations = generations, 
                    verbose = verbose,feature_names = sr_featurname[:-4],function_set = function_set,
                    metric = metric
                    )
                formula = gpmodel.fit(sr_data[:,:-4], sr_data[:,-3])
                score = gpmodel.score(sr_data[:,:-4], sr_data[:,-3])
                print( 'slope = ' + str(formula))

            else:
                # fea_num --> fea_loc
                dummyfea = []
                for i in range(len(gpl_dummyfea)):
                    index = feats.index(gpl_dummyfea[i])
                    dummyfea.append(index)
                # remove fea_loc
                index_array = [i for i in range(len(sr_featurname)-4)]
                for i in range(len(gpl_dummyfea)):
                    index_array.remove(dummyfea[i])
                
                gpmodel = genetic.SymbolicRegressor(
                    population_size = population_size, generations = generations, 
                    verbose = verbose,feature_names = sr_featurname[index_array],function_set = function_set,
                    metric = metric
                    )
                formula = gpmodel.fit(sr_data[:,index_array], sr_data[:,-3])
                score = gpmodel.score(sr_data[:,index_array], sr_data[:,-3])
                print( 'slope = ' + str(formula))

          
            with open(os.path.join('Segmented', 'A_formula derived by gplearn.txt'), 'w') as wfid:
                    print('Formula : ', file=wfid)
                    print(str(formula), file=wfid)
                    print('Fitness : ', file=wfid)
                    print(str(metric) + ' = ' + str(score), file=wfid)
                    print('\n', file=wfid)
                    print('#'*80, file=wfid)
                    print('Symbols annotation:', file=wfid)
                    print('- add : addition, arity=2.', file=wfid)
                    print('- sub : subtraction, arity=2.', file=wfid) 
                    print('- mul : multiplication, arity=2.', file=wfid) 
                    print('- div : protected division where a denominator near-zero returns 1.', file=wfid) 
                    print('- sqrt : protected square root where the absolute value of the argument is used.', file=wfid) 
                    print('- log : protected log where the absolute value of the argument is used.', file=wfid) 
                    print('- abs : absolute value, arity=1.', file=wfid) 
                    print('- neg : negative, arity=1.', file=wfid) 
                    print('- inv : protected inverse where a near-zero argument returns 0.', file=wfid)  
                    print('- max : maximum, arity=2.', file=wfid) 
                    print('- sin : sine (radians), arity=1.', file=wfid) 
                    print('- cos : cosine (radians), arity=1.', file=wfid)
                    print('- tan : tangent (radians), arity=1.', file=wfid)  
                 
                        
            
    elif gplearn == False:
        pass
    

    try:
        # generate figure in pdf
        warnings.filterwarnings('ignore')
        dot = Digraph(comment='Result of TCLR')
        render('A', root, dot, feats)
        dot.render(
            'Result of TCLR {year}.{month}.{day}-{hour}.{minute}'.format(year=namey, month=nameM, day=named, hour=nameh,
                                                                        minute=namem))
        return True
    except :
        print('Can not generate the Tree plot !')
        print('Please ensure that the executable files of Graphviz are present on your system.')
        print('See : https://github.com/Bin-Cao/TCLRmodel/tree/main/User%20Guide')
        return True 
  


# Capture the functional relationships between features and target
# Partitions the feature space into a set of rectangles,
def createTree(dataSet, ori_dataset,feats, leaf_no, correlation,tolerance_list, minsize, threshold, mininc,split_tol,weight):
    # It is a  positive linear relationship
    if correlation == 'PearsonR(+)':
        node = Node(dataSet)
        # Initial R0
        bestR = PearsonR(dataSet[:, -2], dataSet[:, -1])
        node.R = bestR
        __slope = stats.linregress(dataSet[:, -2], dataSet[:, -1])[0]
        node.slope = __slope
        node.intercept = stats.linregress(dataSet[:, -2], dataSet[:, -1])[1]
        if bestR >= threshold and fea_tol(dataSet,ori_dataset,feats,tolerance_list) == True:
            node.leaf_no = leaf_no
            leaf_no += 1
            write_csv(node, feats, True, correlation)
            return node, leaf_no
        # Leave the last two columns of DataSet, a feature of interest and a response
        numFeatures = len(dataSet[0]) - 2
        splitSuccess = False
        bestFeature = -1
        bestValue = 0

        check_valve = False
        for i in range(numFeatures):
            featList = [example[i] for example in dataSet]
            uniqueVals = sorted(list(set(featList)))
            for value in range(len(uniqueVals) - 1):
                # constraints imposed on features  (greater tolerance in split process)
                if not fea_tol_split(dataSet,ori_dataset,feats,tolerance_list,split_tol):
                    continue
                subDataSetA, subDataSetB = splitDataSet(dataSet, i, uniqueVals[value])
                
                if np.unique(subDataSetA[:, -2]).size <= minsize - 1 or np.unique(
                        subDataSetB[:, -2]).size <= minsize - 1:
                    continue
                
                R = weight_gain(subDataSetA,subDataSetB,weight,0)

                if R - bestR >= mininc:
                    check_valve = True
                    splitSuccess = True
                    bestR = R
                    lc = subDataSetA
                    rc = subDataSetB
                    bestFeature = i
                    bestValue = uniqueVals[value]
                
        if check_valve == False:
            for i in range(numFeatures):
                featList = [example[i] for example in dataSet]
                uniqueVals = sorted(list(set(featList)))
                for value in range(len(uniqueVals) - 1):
                    subDataSetA, subDataSetB = splitDataSet(dataSet, i, uniqueVals[value])
                    
                    if np.unique(subDataSetA[:, -2]).size <= minsize - 1 or np.unique(
                            subDataSetB[:, -2]).size <= minsize - 1:
                        continue

                    R = weight_gain(subDataSetA,subDataSetB,weight,0)

                    if R - bestR >= mininc:
                        splitSuccess = True
                        bestR = R
                        lc = subDataSetA
                        rc = subDataSetB
                        bestFeature = i
                        bestValue = uniqueVals[value]
            else:
                pass
                        

        # The recursive boundary is unable to find a division node that can increase factor(R, MIC, R2) by mininc or more.
        if splitSuccess:
            node.lc, leaf_no = createTree(lc, ori_dataset,feats, leaf_no, correlation, tolerance_list,minsize, threshold, mininc,split_tol,weight)
            node.rc, leaf_no = createTree(rc, ori_dataset,feats, leaf_no, correlation,tolerance_list, minsize, threshold, mininc,split_tol,weight)
            node.bestFeature, node.bestValue = bestFeature, bestValue

        # This node is leaf
        if node.lc is None:
            node.leaf_no = leaf_no
            leaf_no += 1
            # determine if this node is to save in all_dataset.csv
            save_in_all = False
            if node.R >= threshold and fea_tol(node.data,ori_dataset,feats,tolerance_list) == True:
                save_in_all = True 
            write_csv(node, feats, save_in_all, correlation)

        return node, leaf_no

    # It is a negative linear relationship
    elif correlation == 'PearsonR(-)':
        node = Node(dataSet)
        bestR = PearsonR(dataSet[:, -2], dataSet[:, -1])
        node.R = bestR
        __slope = stats.linregress(dataSet[:, -2], dataSet[:, -1])[0]
        node.slope = __slope
        node.intercept = stats.linregress(dataSet[:, -2], dataSet[:, -1])[1]
        if bestR <= -threshold and fea_tol(dataSet,ori_dataset,feats,tolerance_list) == True:
            node.leaf_no = leaf_no
            leaf_no += 1
            write_csv(node, feats, True, correlation)
            return node, leaf_no

        numFeatures = len(dataSet[0]) - 2
        splitSuccess = False
        bestFeature = -1
        bestValue = 0

        check_valve = False
        for i in range(numFeatures):
            featList = [example[i] for example in dataSet]
            uniqueVals = sorted(list(set(featList)))
            for value in range(len(uniqueVals) - 1):
                # constraints imposed on features  (greater tolerance in split process)
                if not fea_tol_split(dataSet,ori_dataset,feats,tolerance_list,split_tol):
                    continue
                subDataSetA, subDataSetB = splitDataSet(dataSet, i, uniqueVals[value])

                if np.unique(subDataSetA[:, -2]).size <= minsize - 1 or np.unique(
                        subDataSetB[:, -2]).size <= minsize - 1:
                    continue

                
                R = weight_gain(subDataSetA,subDataSetB,weight,0)

                if R - bestR <= - mininc:
                    check_valve = True
                    splitSuccess = True
                    bestR = R
                    lc = subDataSetA
                    rc = subDataSetB
                    bestFeature = i
                    bestValue = uniqueVals[value]

        if check_valve == False:
            for i in range(numFeatures):
                featList = [example[i] for example in dataSet]
                uniqueVals = sorted(list(set(featList)))
                for value in range(len(uniqueVals) - 1):
                    subDataSetA, subDataSetB = splitDataSet(dataSet, i, uniqueVals[value])

                    if np.unique(subDataSetA[:, -2]).size <= minsize - 1 or np.unique(
                            subDataSetB[:, -2]).size <= minsize - 1:
                        continue
                    
                    R = weight_gain(subDataSetA,subDataSetB,weight,0)

                    if R - bestR <= - mininc:
                        splitSuccess = True
                        bestR = R
                        lc = subDataSetA
                        rc = subDataSetB
                        bestFeature = i
                        bestValue = uniqueVals[value]
        else: pass

        if splitSuccess:
            node.lc, leaf_no = createTree(lc, ori_dataset,feats, leaf_no, correlation,tolerance_list, minsize, threshold, mininc,split_tol,weight)
            node.rc, leaf_no = createTree(rc, ori_dataset,feats, leaf_no, correlation,tolerance_list, minsize, threshold, mininc,split_tol,weight)
            node.bestFeature, node.bestValue = bestFeature, bestValue

        if node.lc is None:
            node.leaf_no = leaf_no
            leaf_no += 1
            # determine if this node is to save in all_dataset.csv
            save_in_all = False
            if node.R >= threshold and fea_tol(node.data,ori_dataset,feats,tolerance_list) == True:
                save_in_all = True 
            write_csv(node, feats, save_in_all, correlation)

        return node, leaf_no

    elif correlation == 'MIC':
        node = Node(dataSet)
        bestR = MIC(dataSet[:, -2], dataSet[:, -1])
        node.R = bestR
        node.slope == None
        if bestR >= threshold and fea_tol(dataSet,ori_dataset,feats,tolerance_list) == True:
            node.leaf_no = leaf_no
            leaf_no += 1
            write_csv(node, feats, True, correlation)
            return node, leaf_no

        numFeatures = len(dataSet[0]) - 2
        splitSuccess = False
        bestFeature = -1
        bestValue = 0

        check_valve = False
        for i in range(numFeatures):
            featList = [example[i] for example in dataSet]
            uniqueVals = sorted(list(set(featList)))
            for value in range(len(uniqueVals) - 1):
                # constraints imposed on features  (greater tolerance in split process)
                if not fea_tol_split(dataSet,ori_dataset,feats,tolerance_list,split_tol):
                    continue
                subDataSetA, subDataSetB = splitDataSet(dataSet, i, uniqueVals[value])
                if np.unique(subDataSetA[:, -2]).size <= minsize - 1 or np.unique(
                        subDataSetB[:, -2]).size <= minsize - 1:
                    continue
                
                R = weight_gain(subDataSetA,subDataSetB,weight,1)

                if R - bestR >= mininc:
                    check_valve = True
                    splitSuccess = True
                    bestR = R
                    lc = subDataSetA
                    rc = subDataSetB
                    bestFeature = i
                    bestValue = uniqueVals[value]

        if check_valve == False:
            for i in range(numFeatures):
                featList = [example[i] for example in dataSet]
                uniqueVals = sorted(list(set(featList)))
                for value in range(len(uniqueVals) - 1):
                    subDataSetA, subDataSetB = splitDataSet(dataSet, i, uniqueVals[value])
                    if np.unique(subDataSetA[:, -2]).size <= minsize - 1 or np.unique(
                            subDataSetB[:, -2]).size <= minsize - 1:
                        continue
    
                    R = weight_gain(subDataSetA,subDataSetB,weight,1)

                    if R - bestR >= mininc:
                        splitSuccess = True
                        bestR = R
                        lc = subDataSetA
                        rc = subDataSetB
                        bestFeature = i
                        bestValue = uniqueVals[value]
        else: pass

        if splitSuccess:
            node.lc, leaf_no = createTree(lc, ori_dataset,feats, leaf_no, correlation,tolerance_list, minsize, threshold, mininc,split_tol,weight)
            node.rc, leaf_no = createTree(rc,ori_dataset, feats, leaf_no, correlation,tolerance_list, minsize, threshold, mininc,split_tol,weight)
            node.bestFeature, node.bestValue = bestFeature, bestValue

        if node.lc is None:
            node.leaf_no = leaf_no
            leaf_no += 1
            # determine if this node is to save in all_dataset.csv
            save_in_all = False
            if node.R >= threshold and fea_tol(node.data,ori_dataset,feats,tolerance_list) == True:
                save_in_all = True 
            write_csv(node, feats, save_in_all, correlation)

        return node, leaf_no

    elif correlation == 'R2':
        node = Node(dataSet)
        bestR = R2(dataSet[:, -2], dataSet[:, -1])
        node.R = bestR
        node.slope == None
        if bestR >= threshold and fea_tol(dataSet,ori_dataset,feats,tolerance_list) == True:
            node.leaf_no = leaf_no
            leaf_no += 1
            write_csv(node, feats, True, correlation)
            return node, leaf_no

        numFeatures = len(dataSet[0]) - 2
        splitSuccess = False
        bestFeature = -1
        bestValue = 0

        check_valve = False
        for i in range(numFeatures):
            featList = [example[i] for example in dataSet]
            uniqueVals = sorted(list(set(featList)))
            for value in range(len(uniqueVals) - 1):
                # constraints imposed on features  (greater tolerance in split process)
                if not fea_tol_split(dataSet,ori_dataset,feats,tolerance_list,split_tol):
                    continue

                subDataSetA, subDataSetB = splitDataSet(dataSet, i, uniqueVals[value])

                if np.unique(subDataSetA[:, -2]).size <= minsize - 1 or np.unique(
                        subDataSetB[:, -2]).size <= minsize - 1:
                    continue

                R = weight_gain(subDataSetA,subDataSetB,weight,2)

                if R - bestR >= mininc:
                    check_valve = True
                    splitSuccess = True
                    bestR = R
                    lc = subDataSetA
                    rc = subDataSetB
                    bestFeature = i
                    bestValue = uniqueVals[value]

        if check_valve == False:
            for i in range(numFeatures):
                featList = [example[i] for example in dataSet]
                uniqueVals = sorted(list(set(featList)))

                for value in range(len(uniqueVals) - 1):
                    if np.unique(subDataSetA[:, -2]).size <= minsize - 1 or np.unique(
                            subDataSetB[:, -2]).size <= minsize - 1:
                        continue

                    R = weight_gain(subDataSetA,subDataSetB,weight,2)

                    if R - bestR >= mininc:
                        splitSuccess = True
                        bestR = R
                        lc = subDataSetA
                        rc = subDataSetB
                        bestFeature = i
                        bestValue = uniqueVals[value]
        else: pass
        
        if splitSuccess:
            node.lc, leaf_no = createTree(lc, ori_dataset,feats, leaf_no, correlation, tolerance_list,minsize, threshold, mininc,split_tol)
            node.rc, leaf_no = createTree(rc, ori_dataset,feats, leaf_no, correlation,tolerance_list, minsize, threshold, mininc,split_tol)
            node.bestFeature, node.bestValue = bestFeature, bestValue

        if node.lc is None:
            node.leaf_no = leaf_no
            leaf_no += 1
            # determine if this node is to save in all_dataset.csv
            save_in_all = False
            if node.R >= threshold and fea_tol(node.data,feats,tolerance_list) == True:
                save_in_all = True 
            write_csv(node, feats, save_in_all, correlation)

        return node, leaf_no


def PearsonR(X, Y):
    xBar = np.mean(X)
    yBar = np.mean(Y)
    SSR = 0
    varX = 0
    varY = 0
    if len(X) > 1:
        for i in range(0, len(X)):
            diffXXBar = X[i] - xBar
            diffYYBar = Y[i] - yBar
            SSR += (diffXXBar * diffYYBar)
            varX += diffXXBar ** 2
            varY += diffYYBar ** 2
        SST = math.sqrt(varX * varY)
    else:
        SST = 1
        SSR = 0
    if SST == 0:
        return 0
    return SSR / SST


def MIC(X, Y):
    if len(X) > 0:
        mine = MINE(alpha=0.6, c=15)
        mine.compute_score(X, Y)
        return mine.mic()
    else:
        MICs = 0
        return MICs


def R2(X, Y):
    X = np.array(X)
    Y = np.array(Y)
    if len(X) > 0:
        a = (X - np.mean(Y)) ** 2
        SStot = np.sum(a)
        b = (X - Y) ** 2
        SSres = np.sum(b)
        r2 = 1 - SSres / SStot
        return r2
    else:
        r2 = -10
        return r2


# Split the DataSet in a specific node
def splitDataSet(dataSet, axis, value):
    retDataSetA = []
    retDataSetB = []
    for featVec in dataSet:
        if featVec[axis] <= value:
            retDataSetA.append(featVec)
        else:
            retDataSetB.append(featVec)
    return np.array(retDataSetA), np.array(retDataSetB)

def fea_tol(dataSet,ori_dataSet,feats,tolerance_list):
    if tolerance_list == None: return True
    else:
        record = 0
        for i in range(len(tolerance_list)):
            __feaname = tolerance_list[i][0]
            __tolratio = float(tolerance_list[i][1])
            index = feats.index(__feaname)
            if (dataSet[:,index].max() - dataSet[:,index].min()) / (ori_dataSet[:,index].max()- ori_dataSet[:,index].min()) <= __tolratio:
                record += 1
        if record == len(tolerance_list):
            return True

def fea_tol_split(dataSet,ori_dataSet,feats,tolerance_list,split_tol):
    if tolerance_list == None: return True
    else:
        record = 0
        for i in range(len(tolerance_list)):
            __feaname = tolerance_list[i][0]
            __tolratio = float(tolerance_list[i][1])
            criter = max(split_tol,__tolratio)
            index = feats.index(__feaname)
            if (dataSet[:,index].max() - dataSet[:,index].min()) / (ori_dataSet[:,index].max()- ori_dataSet[:,index].min()) <= criter:
                record += 1
        if record > int(0.5*len(tolerance_list)):
            return True


# Use graphviz to visualize the TCLR
def render(label, node, dot, feats):
    mark = ''
    if node.slope == None:
        mark = ""#="" + str(node.size) + "" , ρ="" + str(round(node.R, 3))
    else:
        mark = ""#="" + str(node.size) + "" , ρ="" + str(round(node.R, 3)) + ' , slope=' + str(
            round(node.slope, 3)) + ' , intercept=' + str(round(node.intercept, 3))

    if node.lc is None:
        mark = 'No_{}, '.format(node.leaf_no) + mark
    dot.node(label, mark)

    if node.lc is not None:
        render(label + 'A', node.lc, dot, feats)
        render(label + 'B', node.rc, dot, feats)
        dot.edge(label, label + 'A', feats[node.bestFeature] + ""≤"" + str(node.bestValue))
        dot.edge(label, label + 'B', feats[node.bestFeature] + "">"" + str(node.bestValue))


def write_csv(node, feats, save_in_all, correlation):
    global record

    frame = {}
    for i in range(len(feats)):
        frame[feats[i]] = node.data[:, i]
    frame = pd.DataFrame(frame)

    if node.slope == None:
        frame['slope'] = None
        frame['intercept'] = None
        frame[correlation] = np.repeat(node.R, node.size)
        frame.to_csv('Segmented/subdataset_{}.csv'.format(str(node.leaf_no)))
    else:
        frame['slope'] = node.slope
        frame['intercept'] = node.intercept
        frame[correlation] = np.repeat(node.R, node.size)
        frame.to_csv('Segmented/subdataset_{}.csv'.format(str(node.leaf_no)))

    frame.to_csv('Segmented/subdataset_{}.csv'.format(str(node.leaf_no)))

    if not save_in_all:  # do not save in the all_dataset.csv
        _all_dataset = pd.read_csv('./Segmented/all_dataset.csv')
        _all_dataset.drop(index=range(record, record+node.size), axis=0, inplace=True)
        _all_dataset.to_csv('Segmented/all_dataset.csv', index=False)
        return

    _all_dataset = pd.read_csv('./Segmented/all_dataset.csv')
    for item in range(len(frame.iloc[:, 0])):
        _all_dataset.iloc[item + record, :] = frame.iloc[item, :]
    record += len(frame.iloc[:, 0])
    _all_dataset.to_csv('Segmented/all_dataset.csv', index=False)

def weight_gain(subDataSetA,subDataSetB,weight,matrix):
    if matrix == 0:
        newRa = PearsonR(subDataSetA[:, -2], subDataSetA[:, -1])
        newRb = PearsonR(subDataSetB[:, -2], subDataSetB[:, -1])
    elif matrix == 1:
        newRa = MIC(subDataSetA[:, -2], subDataSetA[:, -1])
        newRb = MIC(subDataSetB[:, -2], subDataSetB[:, -1])
    elif matrix == 2:
        newRa = R2(subDataSetA[:, -2], subDataSetA[:, -1])
        newRb = R2(subDataSetB[:, -2], subDataSetB[:, -1])
  

    if weight == False:
        R = (newRa + newRb) / 2
        return R
    elif weight == True:
        weightRa = len(subDataSetA[:, -1]) / (len(subDataSetA[:, -1]) + len(subDataSetB[:, -1]))
        weightRb = len(subDataSetB[:, -1]) / (len(subDataSetA[:, -1]) + len(subDataSetB[:, -1]))
        R = weightRa * newRa + weightRb * newRb
        return R
    else:
        print('Parameter error | weight')


# code on 2023 May 9, Bin Cao
def generate_random_features(df: pd.DataFrame, 
                            feature_list: List[str],
                            num_combinations: int,
                             random_seed:int) -> pd.DataFrame:
    """"""
    randomly generates new feature combinations.

    :param df: DataFrame containing the original features.
    :param feature_list: List of original features.
    :param num_combinations: Number of combination features to generate.
    :return: DataFrame containing the new features.
    """"""
    new_features = []
    random.seed(random_seed)
    # randomly generates combination features
    for i in range(num_combinations):
        # randomly chooses two features
        f1 = random.choice(feature_list)
        f2 = random.choice(feature_list)
        
        # choose a operator
        op = random.choice(['+', '-', '*',])

        self_op1 = random.choice(['*1', '*2', '*3','*4','**2','**3'])
        self_op2 = random.choice(['*1', '*2', '*3','*4','**2','**3'])
        
        new_f1 = f'{f1} {self_op1}'
        new_f2 = f'{f2} {self_op2}'

        # new feature name
        new_feature = f'({new_f1} {op} {new_f2})'
        
        new_features.append(new_feature)
        
        # cal new features
        df[new_feature] = eval(f'(df[""{f1}""] {self_op1}) {op} (df[""{f2}""] {self_op2})')
    
    # reture DataFrame 
    return df","Python"
"Biochemistry","Bin-Cao/TCLRmodel","Researches/Note.md",".md","38","2","# Relevant researches applied of TCLR
","Markdown"