-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathNetworkEnv.py
More file actions
249 lines (221 loc) · 9.28 KB
/
NetworkEnv.py
File metadata and controls
249 lines (221 loc) · 9.28 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
import pandas as pd
import numpy as np
class NetworkEnv:
# metadata = {'render.modes': ['human']}
def __init__(self, env_name):
self.env_name = env_name
self.Node_1Q = np.zeros(10).T
self.Node_2Q = np.zeros(10).T
self.Node_3Q = np.zeros(10).T
self.Node_4Q = np.zeros(10).T
self.Node_5Q = np.zeros(10).T
self.state_observation = np.zeros([10,5])
self.action = np.zeros(5)
self.N1_max_actions = 2
self.N2_max_actions = 3
self.N3_max_actions = 3
self.N4_max_actions = 3
self.N5_max_actions = 3
self.done = False
self.episode_length = 0
def reset(self,df):
# Reset the state of the environment to an initial state
self.Node_1Q = np.array(df.loc[:,"N1Q"])
self.Node_2Q = np.array(df.loc[:,"N2Q"])
self.Node_3Q = np.array(df.loc[:,"N3Q"])
self.Node_4Q = np.array(df.loc[:,"N4Q"])
self.Node_5Q = np.array(df.loc[:,"N5Q"])
self.state_observation = np.array([self.Node_1Q, self.Node_2Q, self.Node_3Q, self.Node_4Q, self.Node_5Q]).T
self.reward=0
self.episode_length = 0
self.done = False
return self.state_observation, self.done, self.reward
def random_action(self):
raction_1 = np.random.randint(self.N1_max_actions + 1)
raction_2 = np.random.randint(self.N2_max_actions + 1)
raction_3 = np.random.randint(self.N3_max_actions + 1)
raction_4 = np.random.randint(self.N4_max_actions + 1)
raction_5 = np.random.randint(self.N5_max_actions + 1)
random_action = np.array([raction_1, raction_2, raction_3, raction_4, raction_5])
return random_action
def step(self, action, observation):
# Execute one time step within the environment
self.done = False
if np.all((observation ==0)):
self.done = True
return self.state_observation, self.reward, self.done, self.episode_length
self.action = action
self.state_observation, self.reward = self.take_action(action)
self.episode_length += 1
return self.state_observation, self.reward, self.done, self.episode_length
def random_step(self):
self.action = self.random_action()
print(self.action)
self.state_observation, self.reward = self.take_action(self.action)
return self.state_observation, self.reward, self.done, self.episode_length
def qcontrol(self, l, size, filler): ## Functon used after each timestep to keep a constant queuee size
length = len(l)
if length>size:
return l[:size]
elif length<size:
for i in range(0,size-length):
l.append(filler)
return l
else:
return l
def take_action(self, action):
#action = self.action
q=self.state_observation.tolist()
tmp = q.pop(0)
#Actions (0: No routing for all)
#N1, 1:route to N2, 2: route to N5
#N2, 1:route to N4, 2: route to N3, 3: route to N1
#N3, 1:route to N2, 2: route to N4, 3: route to N5
#N4, 1:route to N5, 2: route to N3, 3: route to N2
#N5, 1:route to N1, 2: route to N3, 3: route to N4
q1 = self.Node_1Q[1:].tolist()
q2 = self.Node_2Q[1:].tolist()
q3 = self.Node_3Q[1:].tolist() ## Seperating into individual queues for manipulation since each one will
q4 = self.Node_4Q[1:].tolist() ## receive different quantity
q5 = self.Node_5Q[1:].tolist()
count = 0 ## counter managing the multidiscrete action space
reward = 0
## initializng reward for current episode
for i in action:
packet = tmp[count]
if packet != 0:
if count==0:
if i == 0 or i == 3:
q1.insert(0, packet)
reward += -10
elif i==1:
if packet!=2:
q2.insert(0,packet) ## Managing routing algorithm for each qeueu starting by q1 and
reward += -5 ## along with each corresponding action, if the packet is routed
else: ## to its destination it disapears from our env and we get a +10 reward
reward += 20 ## if not it is added to the top of the next queue with a reward of -1
elif i==2:
if packet!=5:
q5.insert(0,packet)
reward += -5
else:
reward += 20 ## if statement are determined by the available actions based on
count+=1 ## network topology and encoded actions
elif count==1:
if i == 0:
q2.insert(0, packet)
reward += -10
elif i==1:
if packet!=4:
q4.insert(0,packet)
reward += -5
else:
reward += 20
elif i==2:
if packet!=3:
q3.insert(0,packet)
reward += -5
else:
reward += 20
elif i==3:
if packet!=1:
q1.insert(0,packet)
reward += -5
else:
reward += 20
count+=1
elif count==2:
if i == 0:
q3.insert(0,packet)
reward += -10
elif i==1:
if packet!=2:
q2.insert(0,packet)
reward += -5
else:
reward += 20
elif i==2:
if packet!=4:
q4.insert(0,packet)
reward += -5
else:
reward += 20
elif i==3:
if packet!=5:
q5.insert(0,packet)
reward += -5
else:
reward += 20
count+=1
elif count==3:
if i == 0:
q4.insert(0, packet)
reward += -5
elif i==1:
if packet!=5:
q5.insert(0,packet)
reward += -5
else:
reward += 20
elif i==2:
if packet!=3:
q3.insert(0,packet)
reward += -5
else:
reward += 20
elif i==3:
if packet!=2:
q2.insert(0,packet)
reward += -5
else:
reward += 20
count+=1
elif count==4:
if i == 0:
q5.insert(0, packet)
reward += -5
elif i==1:
if packet!=1:
q1.insert(0,packet)
reward += -5
else:
reward += 20
elif i==2:
if packet!=3:
q3.insert(0,packet)
reward += -10
else:
reward += 20
elif i==3:
if packet!=4:
q4.insert(0,packet)
reward += -10
else:
reward += 20
count+=1
else:
# globals()[tt] = "q"+ str(count+1)
# tt.insert(0,packet)
if i == 0:
reward += 0
else :
reward += -10
count+=1
q1=self.qcontrol(q1,10,0) ## qcontrol is called for each individual to ensure a constant queue size
q2=self.qcontrol(q2,10,0) ## according to our observation space definition
q3=self.qcontrol(q3,10,0)
q4=self.qcontrol(q4,10,0)
q5=self.qcontrol(q5,10,0)
self.Node_1Q=np.array(q1)
self.Node_2Q=np.array(q2)
self.Node_3Q=np.array(q3)
self.Node_4Q=np.array(q4)
self.Node_5Q=np.array(q5)
new_state = np.array([q1,q2,q3,q4,q5]).T
## reassembling o
return new_state, reward
if __name__ == "__main__":
#env_name = 'Pong-v0'
NetworkEnv(env_name)
#agent.test('Models/PongDeterministic-v4_PG_2.5e-05.h5')
#agent.test('Models/Pong-v0_PG_2.5e-05.h5')