-
Notifications
You must be signed in to change notification settings - Fork 0
/
run_log.py
189 lines (153 loc) · 7.07 KB
/
run_log.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
# -*- coding:utf-8 -*-
import os
import time
import json
import numpy as np
import argparse
import sys
sys.path.append("./olympics_engine")
from env.chooseenv import make
from utils.get_logger import get_logger
from env.obs_interfaces.observation import obs_type
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
def get_players_and_action_space_list(g):
if sum(g.agent_nums) != g.n_player:
raise Exception("agent number = %d 不正确,与n_player = %d 不匹配" % (sum(g.agent_nums), g.n_player))
n_agent_num = list(g.agent_nums)
for i in range(1, len(n_agent_num)):
n_agent_num[i] += n_agent_num[i - 1]
# 根据agent number 分配 player id
players_id = []
actions_space = []
for policy_i in range(len(g.obs_type)):
if policy_i == 0:
players_id_list = range(n_agent_num[policy_i])
else:
players_id_list = range(n_agent_num[policy_i - 1], n_agent_num[policy_i])
players_id.append(players_id_list)
action_space_list = [g.get_single_action_space(player_id) for player_id in players_id_list]
actions_space.append(action_space_list)
#Box(low=-100, high=200.0, shape=(1,), dtype=np.float32)
#Box(low=-30, high=30.0, shape=(1,), dtype=np.float32)
return players_id, actions_space
def get_joint_action_eval(game, multi_part_agent_ids, policy_list, actions_spaces, all_observes):
if len(policy_list) != len(game.agent_nums):
error = "模型个数%d与玩家个数%d维度不正确!" % (len(policy_list), len(game.agent_nums))
raise Exception(error)
# [[[0, 0, 0, 1]], [[0, 1, 0, 0]]]
joint_action = []
for policy_i in range(len(policy_list)):
if game.obs_type[policy_i] not in obs_type:
raise Exception("可选obs类型:%s" % str(obs_type))
agents_id_list = multi_part_agent_ids[policy_i]
action_space_list = actions_spaces[policy_i]
#第一个动作范围是-100,200
#第二个动作范围是-30,30
function_name = 'm%d' % policy_i
for i in range(len(agents_id_list)):
agent_id = agents_id_list[i]
a_obs = all_observes[agent_id]
each = eval(function_name)(a_obs, action_space_list[i], game.is_act_continuous)
#print(each)
game.is_single_valid_action(each, action_space_list[i], policy_i)
joint_action.append(each)
#print(joint_action)
return joint_action
def set_seed(g, env_name):
if env_name.split("-")[0] in ['magent']:
g.reset()
seed = g.create_seed()
g.set_seed(seed)
def run_game(g, env_name, multi_part_agent_ids, actions_spaces, policy_list, render_mode):
"""
This function is used to generate log for Vue rendering. Saves .json file
"""
log_path = os.getcwd() + '/logs/'
if not os.path.exists(log_path):
os.mkdir(log_path)
logger = get_logger(log_path, g.game_name, json_file=render_mode)
set_seed(g, env_name)
#从这里导入agent的文件
for i in range(len(policy_list)):
if policy_list[i] not in get_valid_agents():
raise Exception("agent {} not valid!".format(policy_list[i]))
file_path = os.path.dirname(os.path.abspath(__file__)) + "/agents/" + policy_list[i] + "/submission.py"
if not os.path.exists(file_path):
raise Exception("file {} not exist!".format(file_path))
import_path = '.'.join(file_path.split('/')[-3:])[:-3]
function_name = 'm%d' % i
import_name = "my_controller"
import_s = "from %s import %s as %s" % (import_path, import_name, function_name)
#print(import_s)
#在这里执行
exec(import_s, globals())
st = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
game_info = {"game_name": env_name,
"n_player": g.n_player,
"board_height": g.board_height if hasattr(g, "board_height") else None,
"board_width": g.board_width if hasattr(g, "board_width") else None,
"init_info": g.init_info,
"start_time": st,
"mode": "terminal",
"seed": g.seed if hasattr(g, "seed") else None,
"map_size": g.map_size if hasattr(g, "map_size") else None}
steps = []
#初始化环境
#获取两个agent的观测
all_observes = g.all_observes
while not g.is_terminal():
step = "step%d" % g.step_cnt
if g.step_cnt % 10 == 0:
print(step)
if render_mode and hasattr(g, "env_core"):
if hasattr(g.env_core, "render"):
g.env_core.render()
info_dict = {"time": time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))}
#一个agent的动作空间是两个连续动作空间 分别是朝向和动力 连续变量
joint_act = get_joint_action_eval(g, multi_part_agent_ids, policy_list, actions_spaces, all_observes)
#done判断游戏是否结束
#info_before表示了过去两个agent的动作
#info after中有3个key 分别是agent_position、agent_direction和agent_energy agent_position表示了两个agent的位置和球的位置
#agent_direction 表示了agent和球的朝向
all_observes, reward, done, info_before, info_after = g.step(joint_act)
if env_name.split("-")[0] in ["magent"]:
info_dict["joint_action"] = g.decode(joint_act)
if info_before:
info_dict["info_before"] = info_before
info_dict["reward"] = reward
if info_after:
info_dict["info_after"] = info_after
steps.append(info_dict)
game_info["steps"] = steps
game_info["winner"] = g.check_win()
game_info["winner_information"] = g.won
game_info["n_return"] = g.n_return
ed = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
game_info["end_time"] = ed
logs = json.dumps(game_info, ensure_ascii=False, cls=NpEncoder)
logger.info(logs)
#读取agent文件夹下的具体agent目录
def get_valid_agents():
dir_path = os.path.join(os.path.dirname(__file__), 'agents')
return [f for f in os.listdir(dir_path) if f != "__pycache__"]
if __name__ == "__main__":
env_type = "olympics-tablehockey"
game = make(env_type, seed=None)
render_mode = True
parser = argparse.ArgumentParser()
parser.add_argument("--my_ai", default="random", help="random")
parser.add_argument("--opponent", default="random", help="random")
args = parser.parse_args()
# policy_list = ["random"] * len(game.agent_nums)
policy_list = [args.opponent, args.my_ai] #["random"] * len(game.agent_nums), here we control agent 2 (green agent)
multi_part_agent_ids, actions_space = get_players_and_action_space_list(game)
run_game(game, env_type, multi_part_agent_ids, actions_space, policy_list, render_mode)