12-05-2019, 09:16 AM
Hi there,
I have the Python code below and it works perfectly if i run it without roboDK. If i import it to roboDK and run it from there, it returns me an error (attached image) because of the libraries which are not of roboDK. Do you know how can i solve this?
Code:
我mport cv2 # state of the art computer vision algorithms library
我mport numpy as np # fundamental package for scientific computing
我mport imageio
我mport matplotlib.pyplot as plt # 2D plotting library producing publication quality figures
我mport pyrealsense2 as rs
我mport win32gui
我mport win32api
我mport time
我mport pyautogui
from robolink import * # API to communicate with RoboDK
from robodk import * # basic matrix operations
# Any interaction with RoboDK must be done through
RL = Robolink()
points = rs.points()
pipe = rs.pipeline()
config = rs.config()
profile = pipe.start(config)
colorizer = rs.colorizer()
align = rs.align(rs.stream.color)
state_left = win32api.GetKeyState(0x01) # Left button up = 0 or 1. Button down = -127 or -128
device = profile.get_device() # type: rs.device
depth_sensor = device.first_depth_sensor() # type: rs.depth_sensor
我f depth_sensor.supports(rs.option.depth_units):
depth_sensor.set_option(rs.option.depth_units,0.0001)
robot = RL.Item('UR5e')
frame = RL.Item('Reference Frame')
Target = RL.AddTarget('Target 2', frame)
try:
while True:
frames = pipe.wait_for_frames()
frames = align.process(frames)
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
我f not depth_frame: continue
#print(width,height)
深度= np.asanyarray (colorizer.colorize (depth_frame).get_data())
color = np.asanyarray(color_frame.get_data())
图像= np。hstack((颜色、深度))
#cv2.namedWindow('RGB_View', cv2.WINDOW_NORMAL)
#cv2.setWindowProperty('RGB_View', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.imshow('RGB_View', color)
depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
#flags, hcursor, (x,y) = win32gui.GetCursorInfo()
x, y = win32api.GetCursorPos()
#print(x, y)
pixel_coord = [x, y]
keypressed = win32api.GetKeyState(0x01)
key = cv2.waitKey(1)
# Press esc or 'q' to close the image window
我f key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
#Calculate distance
我f keypressed!=state_left:
state_left = keypressed
print(keypressed)
我f keypressed < 0:
我f (0dist_to_center = depth_frame.get_distance(x,y)
dist_to_center = round(dist_to_center, 4)
depth_point = rs.rs2_deproject_pixel_to_point(depth_intrin, pixel_coord, dist_to_center)
depth_point = [i * 100 for i in depth_point]
depth_point = [round(i, 3) for i in depth_point]
#depth_point = ( ", ".join( repr(e) for e in depth_point))
with open('Coordinates.txt', 'w') as f:
f.write("%s\n" % depth_point)
f.close()
print(depth_point)
#print(depth_point[0:5], depth_point[7:12], depth_point[15:19])
print('The camera is facing an object:',dist_to_center,'meters away')
Target.setPose(Offset(eye(), depth_point[0], depth_point[1], depth_point[2], 0, 0, 0))
robot.MoveL(Target)
time.sleep(0.7)
finally:
pipe.stop()
I have the Python code below and it works perfectly if i run it without roboDK. If i import it to roboDK and run it from there, it returns me an error (attached image) because of the libraries which are not of roboDK. Do you know how can i solve this?
Code:
我mport cv2 # state of the art computer vision algorithms library
我mport numpy as np # fundamental package for scientific computing
我mport imageio
我mport matplotlib.pyplot as plt # 2D plotting library producing publication quality figures
我mport pyrealsense2 as rs
我mport win32gui
我mport win32api
我mport time
我mport pyautogui
from robolink import * # API to communicate with RoboDK
from robodk import * # basic matrix operations
# Any interaction with RoboDK must be done through
RL = Robolink()
points = rs.points()
pipe = rs.pipeline()
config = rs.config()
profile = pipe.start(config)
colorizer = rs.colorizer()
align = rs.align(rs.stream.color)
state_left = win32api.GetKeyState(0x01) # Left button up = 0 or 1. Button down = -127 or -128
device = profile.get_device() # type: rs.device
depth_sensor = device.first_depth_sensor() # type: rs.depth_sensor
我f depth_sensor.supports(rs.option.depth_units):
depth_sensor.set_option(rs.option.depth_units,0.0001)
robot = RL.Item('UR5e')
frame = RL.Item('Reference Frame')
Target = RL.AddTarget('Target 2', frame)
try:
while True:
frames = pipe.wait_for_frames()
frames = align.process(frames)
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
我f not depth_frame: continue
#print(width,height)
深度= np.asanyarray (colorizer.colorize (depth_frame).get_data())
color = np.asanyarray(color_frame.get_data())
图像= np。hstack((颜色、深度))
#cv2.namedWindow('RGB_View', cv2.WINDOW_NORMAL)
#cv2.setWindowProperty('RGB_View', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.imshow('RGB_View', color)
depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
#flags, hcursor, (x,y) = win32gui.GetCursorInfo()
x, y = win32api.GetCursorPos()
#print(x, y)
pixel_coord = [x, y]
keypressed = win32api.GetKeyState(0x01)
key = cv2.waitKey(1)
# Press esc or 'q' to close the image window
我f key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
#Calculate distance
我f keypressed!=state_left:
state_left = keypressed
print(keypressed)
我f keypressed < 0:
我f (0
dist_to_center = round(dist_to_center, 4)
depth_point = rs.rs2_deproject_pixel_to_point(depth_intrin, pixel_coord, dist_to_center)
depth_point = [i * 100 for i in depth_point]
depth_point = [round(i, 3) for i in depth_point]
#depth_point = ( ", ".join( repr(e) for e in depth_point))
with open('Coordinates.txt', 'w') as f:
f.write("%s\n" % depth_point)
f.close()
print(depth_point)
#print(depth_point[0:5], depth_point[7:12], depth_point[15:19])
print('The camera is facing an object:',dist_to_center,'meters away')
Target.setPose(Offset(eye(), depth_point[0], depth_point[1], depth_point[2], 0, 0, 0))
robot.MoveL(Target)
time.sleep(0.7)
finally:
pipe.stop()