forked from nimishbongale/SpiderMonkey
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathMain.py
126 lines (99 loc) · 5.03 KB
/
Main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# -*- coding: utf-8 -*-
"""
Python code of Spider-Monkey Optimization (SMO)
Coded by: Mukesh Saraswat (emailid: [email protected]), Himanshu Mittal (emailid: [email protected]) and Raju Pal (emailid: [email protected])
The code template used is similar to code given at link: https://github.com/himanshuRepo/CKGSA-in-Python
and C++ version of the SMO at link: http://smo.scrs.in/
Reference: Jagdish Chand Bansal, Harish Sharma, Shimpi Singh Jadon, and Maurice Clerc. "Spider monkey optimization algorithm for numerical optimization." Memetic computing 6, no. 1, 31-47, 2014.
@link: http://smo.scrs.in/
-- Main.py: Calling the Spider-Monkey Optimization (SMO) Algorithm
for minimizing of an objective Function
Code compatible:
-- Python: 2.* or 3.*
Code further documented and explained by Repo maintainers
@nimishbongale
@tanisha0311
"""
import SMO as smo #SMO contains the algo code
import benchmarks #benchmarking functions
import csv #for working with csv files
import numpy #math matrix operations
import time #algo running time
import math #normal math operations
def selector(algo,func_details,popSize,Iter,succ_rate,mean_feval):
function_name=func_details[0]
lb=func_details[1]
ub=func_details[2]
dim=func_details[3]
acc_err=func_details[4]
obj_val=func_details[5]
#selection of different parameters
if(algo==0):
x,succ_rate,mean_feval=smo.main(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter,acc_err,obj_val,succ_rate,mean_feval) #getting attributes from different file
return x,succ_rate,mean_feval
# Select optimizers
SMO= True # Code by Himanshu Mittal
# Select benchmark function, for more benchmarks functions, F1=True, F2=True and so on
F1=True
optimizer=[SMO] #list of optimizers, for comparison purposes
benchmarkfunc=[F1] #list of functions
# Select number of repetitions for each experiment.
# To obtain meaningful statistical results, usually ~30 independent runs are executed for each algorithm.
NumOfRuns=2
# Select general parameters for all optimizers (population size, number of iterations)
# popsize ~50, iterations ~100
PopulationSize = 10
Iterations= 5
#Export results ? For testing purposes, export can be turned to false, csv file will not be generated
Export=True
#Automaticly generated name by date and time
ExportToFile="experiment"+time.strftime("%Y-%m-%d-%H-%M-%S")+".csv"
# Check if it works at least once
Flag=False
# CSV Header for convergence
CnvgHeader=[]
for l in range(0,Iterations):
CnvgHeader.append("Iter"+str(l+1))
mean_error=0
total_feval=0 #feval=function eval
mean1=0
var=0 #variance
sd=0 #std deviations
mean_feval=0
succ_rate=0
GlobalMins=numpy.zeros(NumOfRuns)
for i in range (0, len(optimizer)):
for j in range (0, len(benchmarkfunc)):
if((optimizer[i]==True) and (benchmarkfunc[j]==True)): # start experiment if an optimizer and an objective function is selected
for k in range (0,NumOfRuns):
func_details=benchmarks.getFunctionDetails(j)
print("Run: {}".format(k+1)) #to seperate runs
x,succ_rate,mean_feval=selector(i,func_details,PopulationSize,Iterations,succ_rate,mean_feval)
mean_error=mean_error+x.error;
mean1=mean1+x.convergence[-1]
total_feval=total_feval+x.feval
GlobalMins[k]=x.convergence[-1]
if(Export==True):
with open(ExportToFile, 'a') as out:
writer = csv.writer(out,delimiter=',')
if (Flag==False): # just one time to write the header of the CSV file
header= numpy.concatenate([["Optimizer","objfname","startTime","EndTime","ExecutionTime"],CnvgHeader])
writer.writerow(header) #write into csv
a=numpy.concatenate([[x.optimizer,x.objfname,x.startTime,x.endTime,x.executionTime],x.convergence])
writer.writerow(a)
out.close()
print("Results of {} run are saved in 'csv' file.".format(k+1))
Flag=True # at least one experiment
mean1=mean1/NumOfRuns
mean_error=mean_error/NumOfRuns
if(succ_rate>0):
mean_feval=mean_feval/succ_rate
total_feval=total_feval/NumOfRuns
for k in range (NumOfRuns):
var=var + math.pow((GlobalMins[k]-mean1),2)
var=var/NumOfRuns
sd=math.sqrt(var)
# print("values after executing are: \n Mean Error \t Mean Function eval \t Total Function eval \t Variance \t STD \n",(mean_error,mean_feval,total_feval,var,sd))
print("Values after executing SMO: \n Mean Error:{} \n Mean Function eval:{} \n Total Function eval:{} \n Variance:{} \n STD:{}".format(mean_error,mean_feval,total_feval,var,sd))
if (Flag==False): # Faild to run at least one experiment
print("No Optimizer or Cost function is selected. Check lists of available optimizers and cost functions")