-
Notifications
You must be signed in to change notification settings - Fork 0
/
task1a.py
674 lines (499 loc) · 29.5 KB
/
task1a.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
#!/usr/bin/env python3
'''
*****************************************************************************************
*
* ===============================================
* Cosmo Logistic (CL) Theme (eYRC 2023-24)
* ===============================================
*
* This script should be used to implement Task 1A of Cosmo Logistic (CL) Theme (eYRC 2023-24).
*
* This software is made available on an "AS IS WHERE IS BASIS".
* Licensee/end user indemnifies and will keep e-Yantra indemnified from
* any and all claim(s) that emanate from the use of the Software or
* breach of the terms of this agreement.
*
*****************************************************************************************
'''
# Team ID: [ CL#2414 ]
# Author List: [ Aditya Wadgaonkar,Harikishanthini K,Sujan Adiga,Tanay Srivastava]
# Filename: task1a.py
# Functions: [ Comma separated list of functions in this file ]
# Nodes: Add your publishing and subscribing node
# Example: Publishing Topics - [ /tf ]
# Subscribing Topics - [ /camera/aligned_depth_to_color/image_raw, /etc... ]
################### IMPORT MODULES #######################
import rclpy
import sys
import cv2
import math
import tf2_ros
import tf_transformations
import tf2_geometry_msgs
from rclpy.time import Time
import numpy as np
from rclpy.node import Node
from cv_bridge import CvBridge, CvBridgeError
from geometry_msgs.msg import TransformStamped
from scipy.spatial.transform import Rotation as R
from sensor_msgs.msg import CompressedImage, Image
##################### FUNCTION DEFINITIONS #######################
ARUCO_DICT = {
"DICT_4X4_50": cv2.aruco.DICT_4X4_50,
"DICT_4X4_100": cv2.aruco.DICT_4X4_100,
"DICT_4X4_250": cv2.aruco.DICT_4X4_250,
"DICT_4X4_1000": cv2.aruco.DICT_4X4_1000,
"DICT_5X5_50": cv2.aruco.DICT_5X5_50,
"DICT_5X5_100": cv2.aruco.DICT_5X5_100,
"DICT_5X5_250": cv2.aruco.DICT_5X5_250,
"DICT_5X5_1000": cv2.aruco.DICT_5X5_1000,
"DICT_6X6_50": cv2.aruco.DICT_6X6_50,
"DICT_6X6_100": cv2.aruco.DICT_6X6_100,
"DICT_6X6_250": cv2.aruco.DICT_6X6_250,
"DICT_6X6_1000": cv2.aruco.DICT_6X6_1000,
"DICT_7X7_50": cv2.aruco.DICT_7X7_50,
"DICT_7X7_100": cv2.aruco.DICT_7X7_100,
"DICT_7X7_250": cv2.aruco.DICT_7X7_250,
"DICT_7X7_1000": cv2.aruco.DICT_7X7_1000,
"DICT_ARUCO_ORIGINAL": cv2.aruco.DICT_ARUCO_ORIGINAL,
"DICT_APRILTAG_16h5": cv2.aruco.DICT_APRILTAG_16h5,
"DICT_APRILTAG_25h9": cv2.aruco.DICT_APRILTAG_25h9,
"DICT_APRILTAG_36h10": cv2.aruco.DICT_APRILTAG_36h10,
"DICT_APRILTAG_36h11": cv2.aruco.DICT_APRILTAG_36h11
}
def calculate_rectangle_area(coordinates):
'''
Description: Function to calculate area or detected aruco
Args:
coordinates (list): coordinates of detected aruco (4 set of (x,y) coordinates)
Returns:
area (float): area of detected aruco
width (float): width of detected aruco
'''
############ Function VARIABLES ############
# You can remove these variables after reading the instructions. These are just for sample.
area = None
width = None
############ ADD YOUR CODE HERE ############
# INSTRUCTIONS & HELP :
# -> Recevice coordiantes from 'detectMarkers' using cv2.aruco library
# and use these coordinates to calculate area and width of aruco detected.
# -> Extract values from input set of 4 (x,y) coordinates
# and formulate width and height of aruco detected to return 'area' and 'width'.
############################################
[[[x1,y1],[x2,y2],[x3,y3],[x4,y4]]] = coordinates #extracting values from coordinates argument passed form main function
# Use the Shoelace formula to calculate the area
area = 0.5 * abs((x1*y2 + x2*y3 + x3*y4 + x4*y1) - (y1*x2 + y2*x3 + y3*x4 + y4*x1))
#calculating the width of the detected marker
# Calculate the distance between two opposite corners
diagonal1 = math.sqrt((x1 - x3)**2 + (y1 - y3)**2)
diagonal2 = math.sqrt((x2 - x4)**2 + (y2 - y4)**2)
# Check if both diagonals are approximately equal (within a small tolerance)
tolerance = 1e-6
if abs(diagonal1 - diagonal2) < tolerance:
width = diagonal1 # Use either diagonal to determine width
'''if len(coordinates) != 4:
#raise ValueError("Input should contain exactly 4 sets of coordinates")
# Calculate width as the Euclidean distance between two opposite corners
width = ((coordinates[0][0] - coordinates[2][0])*2 + (coordinates[0][1] - coordinates[2][1])*2)*0.5
# Calculate height as the Euclidean distance between the other two opposite corners
height = ((coordinates[1][0] - coordinates[3][0])*2 + (coordinates[1][1] - coordinates[3][1])*2)*0.5
# Calculate the area as the product of width and height
area = width * height'''
return area, width
def aruco_display(corners, ids, rejected, image):
if len(corners) > 0:
ids = ids.flatten()
for (markerCorner, markerID) in zip(corners, ids):
corners = markerCorner.reshape((4, 2))
(topLeft, topRight, bottomRight, bottomLeft) = corners
topRight = (int(topRight[0]), int(topRight[1]))
bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
topLeft = (int(topLeft[0]), int(topLeft[1]))
cv2.line(image, topLeft, topRight, (0, 255, 0), 2)
cv2.line(image, topRight, bottomRight, (0, 255, 0), 2)
cv2.line(image, bottomRight, bottomLeft, (0, 255, 0), 2)
cv2.line(image, bottomLeft, topLeft, (0, 255, 0), 2)
cX = int((topLeft[0] + bottomRight[0]) / 2.0)
cY = int((topLeft[1] + bottomRight[1]) / 2.0)
#cv2.circle(image, (cX, cY), 4, (0, 0, 255), -1)
cv2.putText(image, "id="+str(markerID),(cX+5, cY- 15), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (250, 0, 0), 2)
#cv2.putText(image, "center",(cX, cY- 25), cv2.FONT_HERSHEY_SIMPLEX,
#0.5, (0, 0, 0), 2)
#print("[Inference] ArUco marker ID: {}".format(markerID))
#opLeft[0], topLeft[1] - 10
return image
def detect_aruco(image):
'''
Description: Function to perform aruco detection and return each detail of aruco detected
such as marker ID, distance, angle, width, center point location, etc.
Args:
image (Image): Input image frame received from respective camera topic
Returns:
center_aruco_list (list): Center points of all aruco markers detected
distance_from_rgb_list (list): Distance value of each aruco markers detected from RGB camera
angle_aruco_list (list): Angle of all pose estimated for aruco marker
width_aruco_list (list): Width of all detected aruco markers
ids (list): List of all aruco marker IDs detected in a single frame
'''
############ Function VARIABLES ############
if image is None:
return [], [], [], [], []
# -> You can remove these variables if needed. These are just for suggestions to let you get started
# Use this variable as a threshold value to detect aruco markers of certain size.
# Ex: avoid markers/boxes placed far away from arm's reach position
aruco_area_threshold = 1500
# The camera matrix is defined as per camera info loaded from the plugin used.
# You may get this from /camer_info topic when camera is spawned in gazebo.
# Make sure you verify this matrix once if there are calibration issues.
cam_mat = np.array([[931.1829833984375, 0.0, 640.0], [0.0, 931.1829833984375, 360.0], [0.0, 0.0, 1.0]])
# The distortion matrix is currently set to 0.
# We will be using it during Stage 2 hardware as Intel Realsense Camera provides these camera info.
dist_mat = np.array([0.0,0.0,0.0,0.0,0.0])
# We are using 150x150 aruco marker size
size_of_aruco_m = 0.15
# You can remove these variables after reading the instructions. These are just for sample.
center_aruco_list = []
distance_from_rgb_list = []
angle_aruco_list = []
width_aruco_list = []
ids = []
############ ADD YOUR CODE HERE ############
# INSTRUCTIONS & HELP :
# -> Convert input BGR image to GRAYSCALE for aruco detection
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow("gray_image", gray_image)
cv2.waitKey(100)
# -> Use these aruco parameters-
# -> Dictionary: 4x4_50 (4x4 only until 50 aruco IDs)
aruco_type = "DICT_4X4_50"
#aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250)
cv2.aruco_dict = cv2.aruco.getPredefinedDictionary(ARUCO_DICT[aruco_type])
aruco_params = cv2.aruco.DetectorParameters()
# -> Detect aruco marker in the image and store 'corners' and 'ids'
# -> HINT: Handle cases for empty markers detection.
#corners, ids, _ = cv2.aruco.detectMarkers(gray_image, aruco_dict, parameters=aruco_params)
#corners, ids, rejected= cv2.aruco.detectMarkers(gray_image, aruco_dict, parameters=aruco_params)
(corners, ids, rejected) = cv2.aruco.detectMarkers(gray_image, cv2.aruco_dict,parameters=aruco_params)
#image=aruco_display(corners, ids, rejected, gray_image)
# -> Draw detected marker on the image frame which will be shown later
# -> Loop over each marker ID detected in frame and calculate area using function defined above (calculate_rectangle_area(coordinates))
#print('in detect aruco')
if ids is not None and len(ids) > 0:
#print('In if condition ofdetect aruco')
for i, marker_id in enumerate(ids):
#print('Infor loop ofdetect aruco')
#cv2.aruco.drawFrameAxes(image, cam_mat, dist_mat, rvec, tvec, 0.1)
# Calculate the area of the detected marker
if corners is not None:
#print(corners[i])
area = calculate_rectangle_area(corners[i])
#print("areaa is")
#print(area)
else:
print("corners none")
# -> Remove tags which are far away from arm's reach positon based on some threshold defined
#print(area)
if area[0] > aruco_area_threshold:
# Draw the detected marker on the image
cv2.aruco.drawDetectedMarkers(image, corners)
image_1=aruco_display(corners, ids, rejected, image)
#cv2.circle(image, (cX, cY), 4, (0, 0, 255), -1)
# -> Calculate center points aruco list using math and distance from RGB camera using pose estimation of aruco marker
# -> HINT: You may use numpy for center points and 'estimatePoseSingleMarkers' from cv2 aruco library for pose estimation
# Estimate the pose of the detected marker
rvec, tvec, _ = cv2.aruco.estimatePoseSingleMarkers(corners[i], size_of_aruco_m, cam_mat, dist_mat)
# Calculate the center point of the marker
center = np.mean(corners[i][0], axis=0)
#cv2.circle(image, (center), 4, (0, 0, 255), -1)
# Calculate the distance from the RGB camera (assuming Z-axis is depth)
distance = tvec[0][0][1]
#distance = np.linalg.norm(tvec)
#print('no error in dis')
# Calculate the angle (you may need to adjust this based on your requirements)
angle = rvec[0][0][2]
#angle= np.degrees(np.arctan2(rvec[0][0][1], rvec[0][0][2]))
# Calculate the width of the marker (you may need to adjust this based on your requirements)
#width = np.linalg.norm(corners[i][0][0] - corners[i][0][1])
width = np.abs(corners[i][0][0][0] - corners[i][0][1][0])
# Draw frame axes
cv2.drawFrameAxes(image_1, cam_mat, dist_mat, rvec, tvec,1)
x=center[0]
y=center[1]
c_x=round(x)
c_y=round(y)
cv2.circle(image_1, (c_x,c_y), 5, (0, 0, 255), -1)
cv2.imshow('Detected Markers', image_1)
cv2.waitKey(100)
print("data for arucoId: ",ids[i])
print("x coordinate of centre: ",c_x)
print("y coordinate of centre: ",c_y)
print("angle is: ",angle)
# Append the details to the respective lists
center_aruco_list.append(center)
distance_from_rgb_list.append(distance)
angle_aruco_list.append(angle)
width_aruco_list.append(width)
############################################
#print('detect aruco',center_aruco_list, distance_from_rgb_list, angle_aruco_list, width_aruco_list, ids)
return center_aruco_list, distance_from_rgb_list, angle_aruco_list, width_aruco_list, ids
##################### CLASS DEFINITION #######################
class aruco_tf(Node):
'''
___CLASS___
Description: Class which servers purpose to define process for detecting aruco marker and publishing tf on pose estimated.
'''
def __init__(self):
'''
Description: Initialization of class aruco_tf
All classes have a function called __init__(), which is always executed when the class is being initiated.
The __init__() function is called automatically every time the class is being used to create a new object.
You can find more on this topic here -> https://www.w3schools.com/python/python_classes.asp
'''
super().__init__('aruco_tf_publisher') # registering node
############ Topic SUBSCRIPTIONS ############
self.color_cam_sub = self.create_subscription(Image, '/camera/color/image_raw', self.colorimagecb, 10)
self.depth_cam_sub = self.create_subscription(Image, '/camera/aligned_depth_to_color/image_raw', self.depthimagecb, 10)
############ Constructor VARIABLES/OBJECTS ############
image_processing_rate = 0.2 # rate of time to process image (seconds)
self.bridge = CvBridge() # initialise CvBridge object for image conversion
self.tf_buffer = tf2_ros.buffer.Buffer() # buffer time used for listening transforms
self.listener = tf2_ros.TransformListener(self.tf_buffer, self)
self.br = tf2_ros.TransformBroadcaster(self) # object as transform broadcaster to send transform wrt some frame_id
self.timer = self.create_timer(image_processing_rate, self.process_image) # creating a timer based function which gets called on every 0.2 seconds (as defined by 'image_processing_rate' variable)
self.cv_image = None # colour raw image variable (from colorimagecb())
self.depth_image = None # depth image variable (from depthimagecb())
def depthimagecb(self, data):
'''
Description: Callback function for aligned depth camera topic.
Use this function to receive image depth data and convert to CV2 image
Args:
data (Image): Input depth image frame received from aligned depth camera topic
Returns:
'''
############ ADD YOUR CODE HERE ############
# INSTRUCTIONS & HELP :
# -> Use data variable to convert ROS Image message to CV2 Image type
try:
cv2_depth_image = self.bridge.imgmsg_to_cv2(data, desired_encoding="passthrough")
cv2.imshow("Depth Image", cv2_depth_image)
cv2.waitKey(100)
# -> HINT: You may use CvBridge to do the same
############################################
except Exception as e:
print(f"Error converting depth image: {e}")
self.depth_image=cv2_depth_image
if self.depth_image is None:
print('no depth in construct')
return cv2_depth_image
def colorimagecb(self, data):
'''
Description: Callback function for colour camera raw topic.
Use this function to receive raw image data and convert to CV2 image
Args:
data (Image): Input coloured raw image frame received from image_raw camera topic
Returns:
'''
############ ADD YOUR CODE HERE ############
# INSTRUCTIONS & HELP :
# -> Use data variable to convert ROS Image message to CV2 Image type
bridge = CvBridge()
try:
cv_image = bridge.imgmsg_to_cv2(data, "bgr8")
cv2.imshow("Color Image", cv_image)
cv2.waitKey(100) # Convert the ROS Image message to a CV2 image
# -> HINT: You may use CvBridge to do the same
# Check if you need any rotation or flipping image as input data maybe different than what you expect to be.
# You may use cv2 functions such as 'flip' and 'rotate' to do the same
except Exception as e:
print("Error converting ROS Image to CV2: %s", str(e)) #will show error you have to tun in python environment.
self.cv_image=cv_image
#self.get_logger().info("Received an image")
return cv_image # Handle the error as needed
############################################
def process_image(self):
'''
Description: Timer function used to detect aruco markers and publish tf on estimated poses.
Args:
Returns:
'''
############ Function VARIABLES ############
# These are the variables defined from camera info topic such as image pixel size, focalX, focalY, etc.
# Make sure you verify these variable values once. As it may affect your result.
# You can find more on these variables here -> http://docs.ros.org/en/melodic/api/sensor_msgs/html/msg/CameraInfo.html
sizeCamX = 1280
sizeCamY = 720
centerCamX = 640
centerCamY = 360
focalX = 931.1829833984375
focalY = 931.1829833984375
############ ADD YOUR CODE HERE ############
# INSTRUCTIONS & HELP :
# -> Get aruco center, distance from rgb, angle, width and ids list from 'detect_aruco_center' defined above
#center_aruco_list, distance_from_rgb_list, angle_aruco_list, width_aruco_list, ids = detect_aruco(self.cv_image)
#print(self.cv_image)
center_aruco_list, distance_from_rgb_list, angle_aruco_list, width_aruco_list, ids= detect_aruco(self.cv_image)
#print("ids:", ids)
#print("center_aruco_list:", center_aruco_list)
# -> Loop over detected box ids received to calculate position and orientation transform to publish TF
if ids is not None:
i=0
#for marker_id in ids:
while i < len(angle_aruco_list):
#i=2
if i==3:
break
#if image_1 is not None:
# cv2.imshow('Detected Markers', image_1)
# cv2.waitKey(0)
# -> Use this equation to correct the input aruco angle received from cv2 aruco function 'estimatePoseSingleMarkers' here
# It's a correction formula-
# angle_aruco = (0.788*angle_aruco) - ((angle_aruco**2)/3160)
#print('Marker id is:', marker_id)
#print(angle_aruco_list)
angle=angle_aruco_list[i]
corrected_angle = (0.788 * angle) - ((angle ** 2) / 3160)
# -> Then calculate quaternions from roll pitch yaw (where, roll and pitch are 0 while yaw is corrected aruco_angle)
quaternion = tf_transformations.quaternion_from_euler(0, 0, corrected_angle)
# -> Use center_aruco_list to get realsense depth and log them down. (divide by 1000 to convert mm to m)
distance_from_rgb = self.depth_image[int(center_aruco_list[i][1])][int(center_aruco_list[i][0])] / 1000
print("Values are 1:",center_aruco_list[i][1])
print("Values are 2:",center_aruco_list[i][0])
#distance_from_rgb = distance_from_rgb_list[i]/1000
# -> Use this formula to rectify x, y, z based on focal length, center value and size of image
# x = distance_from_rgb * (sizeCamX - cX - centerCamX) / focalX
# y = distance_from_rgb * (sizeCamY - cY - centerCamY) / focalY
# z = distance_from_rgb
# where,
# cX, and cY from 'center_aruco_list'
# distance_from_rgb is depth of object calculated in previous step
# sizeCamX, sizeCamY, centerCamX, centerCamY, focalX and focalY are defined above
x = distance_from_rgb * (sizeCamX - center_aruco_list[i][0] - centerCamX) / focalX
y = distance_from_rgb * (sizeCamY - center_aruco_list[i][1] - centerCamY) / focalY
z = distance_from_rgb
'''rad = np.angle(90)
print(f"x before rotation: {x} z before rotation {z}")
rotationMatrix = np.array([[np.cos(rad), -np.sin(rad)],[np.sin(rad), np.cos(rad)]])
points =np.array([x,z])
tempX,tempZ = x,z
x=(tempX*np.cos(rad)+tempZ*np.sin(rad))
z=(-tempX*np.sin(rad)+tempZ*np.cos(rad))
points = np.dot(points, rotationMatrix)
print("points: ",points)
#x,z = points[0], points[1]
print(f"x after rotation: {x} z after rotation {z}")'''
angle_in_degrees = 90 # Change this to your desired angle
rad = np.deg2rad(angle_in_degrees)
print(f'x before rotation: {x}, z before rotation: {z}')
# Apply rotation
rotationMatrix_1 = np.array([[np.cos(rad), -np.sin(rad)], [np.sin(rad), np.cos(rad)]])
pointsx_z = np.array([x, z])
x, z = np.dot(pointsx_z, rotationMatrix_1)
rotationMatrix_2 = np.array([[np.cos(-rad), -np.sin(-rad)], [np.sin(-rad), np.cos(-rad)]])
pointsy_z = np.array([y, z])
y, z=np.dot(pointsy_z, rotationMatrix_2)
print(f'x after rotation: {x}, z after rotation: {z}')
# -> Now, mark the center points on image frame using cX and cY variables with help of 'cv2.cirle' function
#print(center_aruco_list[i][0])
print("value of i is : ",i)
print("DIstance from RGB: ", distance_from_rgb_list)
print("value of x is :",x)
print("value of y is :",y)
print("value of z is :",z)
#new part added
'''R = tf_transformations.quaternion_matrix(quaternion)
position = [x, y, z, 1] # Homogeneous coordinates
transformed_position = R.dot(position)
x = transformed_position[2]
y = transformed_position[0]
z = transformed_position[1]'''
#new part ended here
# -> Here, till now you receive coordinates from camera_link to aruco marker center position.
# So, publish this transform w.r.t. camera_link using Geometry Message - TransformStamped
# so that we will collect it's position w.r.t base_link in next step.
# Use the following frame_id-
# frame_id = 'camera_link'
# child_frame_id = 'cam_<marker_id>' Ex: cam_20, where 20 is aruco marker ID
'''
quaternion /= np.linalg.norm(quaternion)
# Define the desired rotation in radians (e.g., 90 degrees around the Z-axis)
desired_rotation_angle = np.deg2rad(90) # Change this to your desired angle
desired_rotation_quaternion = tf2_ros.transformations.quaternion_from_euler(0, 0, desired_rotation_angle)
# Rotate the object's orientation by the desired rotation
rotated_quaternion = tf2_ros.transformations.quaternion_multiply(desired_rotation_quaternion, quaternion)
# Normalize the rotated quaternion
rotated_quaternion /= np.linalg.norm(rotated_quaternion)
tf_msg = TransformStamped()
tf_msg.header.stamp = self.get_clock().now().to_msg()
tf_msg.header.frame_id = 'camera_link'
#tf_msg.child_frame_id = 'cam_' + str(marker_id)
tf_msg.child_frame_id = 'cam_' + str(ids[i])
tf_msg.transform.translation.x = x
tf_msg.transform.translation.y = y
tf_msg.transform.translation.z = z
tf_msg.transform.rotation.x = rotated_quaternion[0]
tf_msg.transform.rotation.y = rotated_quaternion[1]
tf_msg.transform.rotation.z = rotated_quaternion[2]
tf_msg.transform.rotation.w = rotated_quaternion[3]
'''# Transformation from camera_link to ArUco marker center
tf_msg = TransformStamped()
tf_msg.header.stamp = self.get_clock().now().to_msg()
tf_msg.header.frame_id = 'camera_link'
tf_msg.child_frame_id = 'cam_' + str(ids[i])
tf_msg.transform.translation.x = x
tf_msg.transform.translation.y = y
tf_msg.transform.translation.z = z
tf_msg.transform.rotation.x = quaternion[2]
tf_msg.transform.rotation.y = -quaternion[0]
tf_msg.transform.rotation.z = quaternion[1]
tf_msg.transform.rotation.w = quaternion[3]
self.br.sendTransform(tf_msg)
try:
print("In try: ")
#base_to_obj = self.tf_buffer.lookup_transform('base_link','cam_'+ str(marker_id), Time())
base_to_obj = self.tf_buffer.lookup_transform('base_link','cam_'+ str(ids[i]), Time())
#base_to_obj = self.tf_buffer.lookup_transform('base_link', f'obj_{aruco_marker["id"]}', rclpy.Time())
#we can use self.get_clock().now() instead of rclpy.Time()
self.br.sendTransform(base_to_obj)
# Publish the object frame transform message
obj_tf_msg = TransformStamped()
#or use below
#obj_tf_msg = tf2_geometry_msgs.TransformStamped()
obj_tf_msg.header.stamp = self.get_clock().now().to_msg()
obj_tf_msg.header.frame_id = 'base_link'
#obj_tf_msg.child_frame_id = f'obj_{aruco_marker["id"]}'
#obj_tf_msg.child_frame_id ='obj_' + str(marker_id)
obj_tf_msg.child_frame_id ='obj_' + str(ids[i])
obj_tf_msg.transform = base_to_obj.transform
self.br.sendTransform(obj_tf_msg)
except tf2_ros.LookupException as e:
print("In except: ")
self.get_logger().error(f'Error looking up transform: {e}')
i=i+1
# -> At last show cv2 image window having detected markers drawn and center points located using 'cv2.imshow' function.
# Refer MD book on portal for sample image -> https://portal.e-yantra.org/
# -> NOTE: The Z axis of TF should be pointing inside the box (Purpose of this will be known in task 1B)
# Also, auto eval script will be judging angular difference aswell. So, make sure that Z axis is inside the box (Refer sample images on Portal - MD book)
############################################
##################### FUNCTION DEFINITION #######################
def main():
'''
Description: Main function which creates a ROS node and spin around for the aruco_tf class to perform it's task
'''
rclpy.init(args=sys.argv) # initialisation
node = rclpy.create_node('aruco_tf_process') # creating ROS node
node.get_logger().info('Node created: Aruco tf process') # logging information
aruco_tf_class = aruco_tf() # creating a new object for class 'aruco_tf'
rclpy.spin(aruco_tf_class) # spining on the object to make it alive in ROS 2 DDS
aruco_tf_class.destroy_node() # destroy node after spin ends
rclpy.shutdown() # shutdown process
if __name__ == '__main__':
'''
Description: If the python interpreter is running that module (the source file) as the main program,
it sets the special __name__ variable to have a value “__main__”.
If this file is being imported from another module, __name__ will be set to the module’s name.
You can find more on this here -> https://www.geeksforgeeks.org/what-does-the-if-__name__-__main__-do/
'''
main()