diff --git a/_sources/lib/Vision.rst.txt b/_sources/lib/Vision.rst.txt index 736cc60..5c030d9 100644 --- a/_sources/lib/Vision.rst.txt +++ b/_sources/lib/Vision.rst.txt @@ -32,13 +32,14 @@ Using the Vision Module import cv2 from compLib import Vision - # get newest opencv frame from camera - frame = Vision.Streaming.get_frame() + while True: + # get newest opencv frame from camera + frame = Vision.Streaming.get_frame() - # do some processing with the frame..... + # do some processing with the frame..... - # publish frame to streaming server - Vision.Streaming.publish_frame(frame) + # publish frame to streaming server + Vision.Streaming.publish_frame(frame) Connect the raspberry pi to your internet and view the stream at: "http://your_raspi_ip:9898/". This should display your raspberry pi camera. Note: the stream will lag a little bit BUT the processing of the image will be done in @@ -68,22 +69,23 @@ For testing you can point it at this image: import cv2 from compLib import Vision - # get newest opencv frame from camera - frame = Vision.Streaming.get_frame() + while True: + # get newest opencv frame from camera + frame = Vision.Streaming.get_frame() - criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) + criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) - # convert image to grayscale image - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + # convert image to grayscale image + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - # find the chessboard corners - ret, corners = cv2.findChessboardCorners(gray, (6, 9), None) + # find the chessboard corners + ret, corners = cv2.findChessboardCorners(gray, (6, 9), None) - # draw detected chessboard position onto the image - cv2.drawChessboardCorners(frame, (6, 9), corners, ret) + # draw detected chessboard position onto the image + cv2.drawChessboardCorners(frame, (6, 9), corners, ret) - # publish frame to streaming server - Vision.Streaming.publish_frame(frame) + # publish frame to streaming server + Vision.Streaming.publish_frame(frame) Connect the raspberry pi to your internet and view the stream at: "http://your_raspi_ip:9898/". diff --git a/_static/pygments.css b/_static/pygments.css index 13b7243..631bc92 100644 --- a/_static/pygments.css +++ b/_static/pygments.css @@ -1,10 +1,5 @@ -pre { line-height: 125%; } -td.linenos pre { color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px; } -span.linenos { color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px; } -td.linenos pre.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } -span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } .highlight .hll { background-color: #ffffcc } -.highlight { background: #f8f8f8; } +.highlight { background: #f8f8f8; } .highlight .c { color: #408080; font-style: italic } /* Comment */ .highlight .err { border: 1px solid #FF0000 } /* Error */ .highlight .k { color: #008000; font-weight: bold } /* Keyword */ diff --git a/genindex.html b/genindex.html index f041b0c..25dd0b1 100644 --- a/genindex.html +++ b/genindex.html @@ -3,9 +3,9 @@
- + - +from compLib.Api import DoubleElim
position = DoubleElim.get_position()
-print(f"Position of my robot is: x={position.x}, y={position.y} and rotation is: {position.degrees}")
+print(f"Position of my robot is: x={position.x}, y={position.y} and rotation is: {position.degrees}")
goal = DoubleElim.get_goal()
-print(f"Goal is at: x={goal.x}, y={goal.y}")
+print(f"Goal is at: x={goal.x}, y={goal.y}")