-
Notifications
You must be signed in to change notification settings - Fork 52
Expand file tree
/
Copy pathq_learning.py
More file actions
150 lines (125 loc) · 4.73 KB
/
q_learning.py
File metadata and controls
150 lines (125 loc) · 4.73 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
"""--------------------------------"""
""" Initialisation de Flappy Bird """
"""--------------------------------"""
from ple.games.flappybird import FlappyBird
from ple import PLE
# Définition des actions
actions = [None, 119]
game = FlappyBird(graphics="fixed")
p = PLE(game, fps=30, frame_skip=1, num_steps=1, force_fps=True, display_screen=True)
p.init()
"""----------------------------"""
""" Création du Deep Q-Network """
"""----------------------------"""
from keras.models import Sequential
from keras.layers import Dense
from keras import optimizers
dqn = Sequential()
# 1st layer
dqn.add(Dense(units=500, kernel_initializer='lecun_uniform', activation="relu", input_dim = 8))
# output layer
dqn.add(Dense(units=2, kernel_initializer='lecun_uniform', activation="linear"))
dqn.compile(loss='mse', optimizer=optimizers.Adam(1e-4))
dqn.load_weights("dqn_2_1.dqf")
#dqn.load_weights("dqn_0_3.dqf")
"""-----------------------------------------"""
""" Définition de quelques fonctions utiles """
"""-----------------------------------------"""
import numpy as np
def process_state(state):
""" Renvoie l'état sous forme de liste """
return [state['player_y'], state['player_vel'],
state['next_pipe_dist_to_player'], state['next_pipe_top_y'], state['next_pipe_bottom_y'],
state['next_next_pipe_dist_to_player'], state['next_next_pipe_top_y'], state['next_next_pipe_bottom_y']]
def epsilon(step):
""" Utile à la décision de l'action """
#if step<1e6:
# return 1.-step*9e-7
#return .1
return 0.01
def clip_reward(r):
""" Change la valeur de reward """
rr=0
if r > 0:
rr = 1
if r < 0:
rr = -1000
return rr
def greedy_action(network, state_x):
""" Renvoie la meilleure action possible """
Q = network.predict(np.array(state_x).reshape(1, len(state_x)), batch_size=batchSize)
return np.argmax(Q)
def MCeval(network, games, gamma):
""" Evaluation du réseau de neurones """
scores = np.zeros(games)
for i in range(games):
p.reset_game()
state_x = process_state(game.getGameState())
step = -1
while not game.game_over():
step += 1
action = greedy_action(network, state_x)
reward = p.act(actions[action])
state_y = process_state(game.getGameState())
scores[i] = scores[i] + reward
state_x = state_y
return np.mean(scores)
"""----------------------"""
""" Apprentissage du DQN """
"""----------------------"""
# Variables utiles
total_games = 10000
gamma = 0.99
step = 0
batchSize = 256
# Définition des évaluations
evaluation_period = 300
nb_epochs = total_games // evaluation_period
epoch=-1
scoreMC = np.zeros((nb_epochs))
# Enregistrement du réseau de neurones
filename = "dqn_3_"
"""-----------------"""
""" Deep Q-Learning """
"""-----------------"""
for id_game in range(total_games):
if id_game % evaluation_period == 0:
epoch += 1
scoreMC[epoch] = MCeval(dqn, 50, gamma)
dqn.save(filename + str(epoch) + ".dqf")
print(">>> Eval n°%d | score = %f" % (epoch, scoreMC[epoch]))
p.reset_game() # Nouvelle partie
state_x = process_state(game.getGameState())
id_frame = 0
score = 0
alea = 0
while not game.game_over():
id_frame += 1
step += 1
## Choisit l'action à effectuer : 0 ou 1
if np.random.rand() < epsilon(step): # Action au hasard
alea += 1
action = np.random.choice([0, 1])
else: # Meilleure action possible
action = greedy_action(dqn, state_x)
## Joue l'action et observe le gain et l'état suivant
reward = p.act(actions[action])
reward = clip_reward(reward)
state_y = process_state(game.getGameState())
## Mise à jour de Q
QX = dqn.predict(np.array(state_x).reshape(1, len(state_x)), batch_size=batchSize)
y = np.zeros(2)
y[:] = QX[:]
if not game.game_over():
score += reward
QY = dqn.predict(np.array(state_y).reshape(1, len(state_y)), batch_size=batchSize)
QYmax = np.max(QY)
update = reward + gamma * QYmax
else:
update = reward
y[action] = update
dqn.fit(np.array(state_x).reshape(1, len(state_x)), np.array(y).reshape(1, len(y)), nb_epoch = 3, verbose = 0)
state_x = state_y
print(">>> game n°%d | score = %d | nb_steps = %d | %% aléa = %f%%" % (id_game, score, id_frame, alea/id_frame*100))
for i in nb_epochs:
print("epoch n°%d | score = %f" % (i, scoreMC[i]))