-
Notifications
You must be signed in to change notification settings - Fork 10
/
Copy pathhybrid.py
145 lines (116 loc) · 5.86 KB
/
hybrid.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Moto: Induction motor parameter estimation tool
Hybrid Algorithms
Author: Julius Susanto
Last edited: August 2014
"""
import numpy as np
import globals
from descent import nr_solver, dnr_solver, lm_solver
"""
HY_SOLVER - Hybrid algorithm solver for double cage model with core losses
Solves for 6 circuit parameters [Xs Xm Rr1 Xr1 Rr2 Rc]
Rs and Xr2 are computed by linear restrictions
Includes change of variables
Includes adaptive step size (as per Pedra 2008)
Includes determinant check of jacobian matrix
Usage: hy_solver (p, desc, pop, n_r, n_e, c_f, n_gen, err_tol)
Where p is a vector of motor performance parameters:
p = [sf eff pf Tb Tlr Ilr]
sf = full-load slip
eff = full-load efficiency
pf = full-load power factor
T_b = breakdown torque (as # of FL torque)
T_lr = locked rotor torque (as # of FL torque)
I_lr = locked rotor current
desc is the type of descent algorithm used - "NR", "LM", "DNR"
pop is the population each generation
n_r is the number of members retained for mating
n_e is the number of elite children
c_f is the crossover fraction
n_gen is the maximum number of generations
err_tol is the error tolerance for convergence
Returns: x is a vector of motor equivalent parameters:
x = [Rs Xs Xm Rr1 Xr1 Rr2 Xr2 Rc]
x(0) = Rs = stator resistance
x(1) = Xs = stator reactance
x(2) = Xm = magnetising reactance
x(3) = Rr1 = rotor / inner cage resistance
x(4) = Xr1 = rotor / inner cage reactance
x(5) = Rr2 = outer cage resistance
x(6) = Xr2 = outer cage reactance
x(7) = Rc = core resistance
iter is the number of iterations
err is the squared error of the objective function
conv is a true/false flag indicating convergence
"""
def hy_solver(self, desc, p, pop, n_r, n_e, c_f, n_gen, err_tol):
# Initial settings
gen = 1
conv = 0
sigma = 0.01
# Create initial population of Rs and Xr2 estimates
RX = 0.15 * np.random.rand(pop,2)
x = np.zeros((pop,8))
iter = np.zeros(pop)
err = np.zeros(pop)
conv = np.zeros(pop)
# Check solution of initial population
for i in range(0,pop):
self.statusBar().showMessage('Calculating generation %d, member %d...' % (gen, i+1))
if desc == "NR":
[x[i,:], iter[i], err[i], conv[i]] = nr_solver(p, 1, RX[i,0], RX[i,1], globals.algo_data["max_iter"], globals.algo_data["conv_err"])
if desc == "LM":
[x[i,:], iter[i], err[i], conv[i]] = lm_solver(p, 1, RX[i,0], RX[i,1], 1e-7, 5.0, globals.algo_data["max_iter"], globals.algo_data["conv_err"])
if desc == "DNR":
[x[i,:], iter[i], err[i], conv[i]] = dnr_solver(p, 1, RX[i,0], RX[i,1], 1e-7, globals.algo_data["max_iter"], globals.algo_data["conv_err"])
if err[i] < err_tol:
z = x[i,:]
conv = 1
return z, gen, err[i], conv
# Run genetic algorithm
for gen in range(2,n_gen+1):
# Select for fitness
fitness = np.sort(err)
index = np.argsort(err)
# Create next generation
RX_new = np.zeros((pop,2))
RX_mate = RX[index[0:n_r],:] # select mating pool
# Elite children (select best "n_e" children for next generation)
RX_new[0:n_e,:] = RX[index[0:n_e],:]
# Crossover (random weighted average of parents)
n_c = int(np.round((pop - n_e) * c_f)) # number of crossover children
for j in range(0,n_c):
i_pair = np.ceil(n_r * np.random.rand(2,1)) # generate random pair of parents
weight = np.random.rand(2) # generate random weighting
# Crossover parents by weighted blend to generate new child
RX_new[(n_e + j),:] = weight * RX_mate[int(i_pair[0,0])-1,:] + (1 - weight) * RX_mate[int(i_pair[1,0])-1,:]
# Mutation (gaussian noise added to parents)
n_m = pop - n_e - n_c; # number of mutation children
for k in range(0,n_m):
# Select random parent from mating pool and add white noise
RX_new[(n_e + n_c + k),:] = np.abs(RX_mate[int(np.ceil(n_r * np.random.rand(1)) - 1),:] + sigma * np.random.randn(2))
RX = RX_new
# Check solution of current generation
for i in range(0,pop):
self.statusBar().showMessage('Calculating generation %d, member %d...' % (gen, i+1))
if desc == "NR":
[x[i,:], iter[i], err[i], conv[i]] = nr_solver(p, 1, RX[i,0], RX[i,1], globals.algo_data["max_iter"], globals.algo_data["conv_err"])
if desc == "LM":
[x[i,:], iter[i], err[i], conv[i]] = lm_solver(p, 1, RX[i,0], RX[i,1], 1e-7, 5.0, globals.algo_data["max_iter"], globals.algo_data["conv_err"])
if desc == "DNR":
[x[i,:], iter[i], err[i], conv[i]] = dnr_solver(p, 1, RX[i,0], RX[i,1], 1e-7, globals.algo_data["max_iter"], globals.algo_data["conv_err"])
if err[i] < err_tol:
z = x[i,:]
conv = 1
return z, gen, err[i], conv
# If the last generation, then output best results
if gen == n_gen:
fitness = np.sort(err)
index = np.argsort(err)
z = x[index[0],:]
conv = 0
err = fitness[0]
return z, gen, err, conv