-
Notifications
You must be signed in to change notification settings - Fork 3
/
demo_kuka.py
95 lines (80 loc) · 3.44 KB
/
demo_kuka.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import numpy as np
import pybullet as p
import pybullet_data
from argparse import ArgumentParser
from planners import *
from utils import *
import utils
parser = ArgumentParser()
parser.add_argument("--algo", choices=["RRT", "BiRRT"], default="BiRRT")
args = parser.parse_args()
if __name__ == "__main__":
np.set_printoptions(precision=5, suppress=True)
utils.DISABLE_DRAWING = True
# Iitialize PyBullet
physicsClient = p.connect(p.GUI)
p.setGravity(0, 0, -9.8)
p.setTimeStep(SIM_DT)
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
p.resetDebugVisualizerCamera(cameraDistance=1.3,
cameraYaw=135,
cameraPitch=-30,
cameraTargetPosition=[0, 0., 0.3])
# Load world plane and robot
p.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally
plane_id = p.loadURDF("plane.urdf")
start_pos = [0, 0, 0]
start_euler = [0, 0, 0]
start_orientation = p.getQuaternionFromEuler(start_euler)
robot_id = p.loadURDF("kuka_iiwa/model.urdf", start_pos, start_orientation, useFixedBase=True)
add_wall_segment(5.2, 5.2, 6)
# Parse active DoFs from robot urdf
joint_limits, actuated_joints = parse_actuated_joints(robot_id, return_limits=True)
joint_polarities = np.ones(len(actuated_joints))
joint_polarities[3] *= -1 #! make sure polarity of each joint is correct
print(f"Actuated joint indices: {actuated_joints}")
# Define robot state space
state_space = np.array(joint_limits).reshape((-1, 2))
print("State Spaces of the 7-DOF Arm:\n", state_space)
# Collision detection callback
collision_fn = get_collision_fn(robot_id, actuated_joints)
# Initialize start and goal positions in [x, y, z]
start_pos = [0.0, 0.5, 0.4]
goal_pos = [0.5, 0.0, 0.4]
p.addUserDebugPoints(pointPositions=[start_pos],
pointColorsRGB=[BLACK[:3]],
pointSize=15,
lifeTime=0)
p.addUserDebugPoints(pointPositions=[goal_pos],
pointColorsRGB=[GREEN[:3]],
pointSize=15,
lifeTime=0)
ik_solver = IIK(robot_id,
actuated_joints,
joint_limits,
joint_polarities,
alpha=0.001, # 0.001 for visualization, 0.01 for fast convergence
beta=0.1)
config_guess = np.zeros(len(actuated_joints))
start_config = ik_solver.calculate_ik(start_pos, config_guess)
goal_config = ik_solver.calculate_ik(goal_pos, config_guess)
assert not collision_fn(goal_config) and not collision_fn(start_config)
path = []
# RRT parameters
step_size = 0.02 # unit vec in config space
goal_bias = 0.1 # 5% ~ 10%
### Run planner container
print(f"Start path planning with {args.algo}...")
algo_container = eval(args.algo)(state_space,
collision_fn,
goal_bias=goal_bias,
step_size=step_size,
)
path = algo_container.plan_path(start_config, goal_config)
###
# Execute planned path
set_joint_positions(robot_id, actuated_joints, start_config)
print("Start executing planned path...")
execute_trajectory(robot_id, actuated_joints, path, draw=True)
input("Done!")
p.disconnect()