→라즈베리파이 GPIO 핀 번호, 사용 가능한 핀 정리
Raspberry pi 에서 python으로 GPIO 사용하기
→Raspberry pi 에서 python으로 GPIO 사용하기
================================================================
[05. 멀티쓰레드] 멀티쓰레드 1 (프로세스와 스레드, 문제점-DataRace)
→ 인터럽트 원리
라즈베리 파이 GPIO 제어 (console, python)
Control Raspberry Pi GPIO Pins from Python
코드맨
import cv2
import mediapipe as mp
import numpy as np
import time
import RPi.GPIO as GPIO
# varialbe define
total_step = 0
step_num = 0
counter_before = 0
counter_now = 0
counter = 0
currentStateCLK = 0
lastStateCLK = 0
dir = 0
# mode define
rotationMode = "stop"
encoderOnOff = "Off"
visionOnOff = "Off"
IROnOff = "Off"
workMode = "None"
#channel define
DIR = 0
STP = 0
EN = 0
IR_R = 0
IR_L = 0
SW = 0
DT = 0
CLK = 0
IRChannel = 0
visionChannel = 0
# signal setup
GPIO.setmode(GPIO.BOARD)
GPIO.setup(STP, GPIO.OUT)
GPIO.setup(DIR, GPIO.OUT)
GPIO.setup(IR_R, GPIO.IN)
GPIO.setup(IR_L, GPIO.IN)
GPIO.setup(CLK, GPIO.IN)
GPIO.setup(DT, GPIO.IN)
GPIO.setup(SW, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
GPIO.setup(IRChannel, GPIO.IN)
GPIO.setup(visionChannel, GPIO.IN)
lastStateCLK = GPIO.input(CLK)
#define function
def IRSensing() :
IRSignalLeft = GPIO.input(IR_L)
IRSignalRight = GPIO.input(IR_R)
if IRSignalRight == GPIO.LOW & IRSignalLeft == GPIO.HIGH :
step_num = 200
dir = GPIO.LOW
elif IRSignalRight == GPIO.HIGH & IRSignalLeft == GPIO.LOW :
step_num = 200
dir = GPIO.HIGH
def Drive(stepVariable, directionVariable) :
GPIO.output(DIR, directionVariable)
i=0
while True:
GPIO.output(STP, GPIO.HIGH)
time.sleep(0.001)
GPIO.output(STP, GPIO.LOW)
time.sleep(0.001)
i=i+1
if i > stepVariable :
break
if directionVariable == GPIO.HIGH :
total_step = total_step + stepVariable
else :
total_step = total_step - stepVariable
# define inturrupt
def encoderCount() :
currnetStateCLK = GPIO.input(CLK)
if currentStateCLK != lastStateCLK & currentStateCLK == GPIO.HIGH :
if GPIO.input(DT) != currentStateCLK :
counter_now = counter_now + 1
else:
counter_now = counter_now - 1
def encoderSwitch() :
encoderOnOff = "On"
def visionSwitch() :
if visionOnOff == "Off" :
visionOnOff = "On"
else :
visionOnOff = "Off"
def IRSwitch() :
if IROnOff == "Off" :
IROnOff = "On"
else :
IROnOff = "Off"
# inturrupt define
GPIO.add_event_detect(IRChannel, GPIO.RISING, callback=IRSwitch, bouncetime=200)
GPIO.add_event_detect(visionChannel, GPIO.RISING, callback=visionSwitch, bouncetime=200)
GPIO.add_event_detect(SW, GPIO.RISING, callback=encoderSwitch, bouncetime=200)
GPIO.add_event_detect(CLK, GPIO.RISING, callback=encoderCount)
# vision setup
global mode
mp_face_mesh = mp.solutions.face_mesh
face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5)
mp_drawing = mp.solutions.drawing_utils
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
cap = cv2.VideoCapture(0)
# WORK
while cap.isOpened():
success, image = cap.read()
start = time.time()
# Flip the image horizontally for a later selfie-view display
# Also convert the color space from BGR to RGB
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance
image.flags.writeable = False
# Get the result
results = face_mesh.process(image)
# To improve performance
image.flags.writeable = True
# Convert the color space from RGB to BGR
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
img_h, img_w, img_c = image.shape
face_3d = []
face_2d = []
if results.multi_face_landmarks:
for face_landmarks in results.multi_face_landmarks:
for idx, lm in enumerate(face_landmarks.landmark):
if idx == 33 or idx == 263 or idx == 1 or idx == 61 or idx == 291 or idx == 199:
if idx == 1:
nose_2d = (lm.x * img_w, lm.y * img_h)
nose_3d = (lm.x * img_w, lm.y * img_h, lm.z * 3000)
x, y = int(lm.x * img_w), int(lm.y * img_h)
# Get the 2D Coordinates
face_2d.append([x, y])
# Get the 3D Coordinates
face_3d.append([x, y, lm.z])
# Convert it to the NumPy array
face_2d = np.array(face_2d, dtype=np.float64)
# Convert it to the NumPy array
face_3d = np.array(face_3d, dtype=np.float64)
# The camera matrix
focal_length = 1 * img_w
cam_matrix = np.array([ [focal_length, 0, img_h / 2],
[0, focal_length, img_w / 2],
[0, 0, 1]])
# The distortion parameters
dist_matrix = np.zeros((4, 1), dtype=np.float64)
# Solve PnP
success, rot_vec, trans_vec = cv2.solvePnP(face_3d, face_2d, cam_matrix, dist_matrix)
# Get rotational matrix
rmat, jac = cv2.Rodrigues(rot_vec)
# Get angles
angles, mtxR, mtxQ, Qx, Qy, Qz = cv2.RQDecomp3x3(rmat)
# Get the y rotation degree
x = angles[0] * 360
y = angles[1] * 360
z = angles[2] * 360
# See where the user's head tilting
if -20 < y <= -7:
text = "Looking Left little"
elif 20 > y >= 7:
text = "Looking Right little"
elif -20 > y :
text = "Looking Left"
rotationMode = "rotating left"
elif 20 < y :
text = "Looking Right"
rotationMode = "rotating right"
else:
text = "Forward"
rotationMode = "stop"
# Display the nose direction
nose_3d_projection, jacobian = cv2.projectPoints(nose_3d, rot_vec, trans_vec, cam_matrix, dist_matrix)
p1 = (int(nose_2d[0]), int(nose_2d[1]))
p2 = (int(nose_2d[0] + y * 10) , int(nose_2d[1] - x * 10))
cv2.line(image, p1, p2, (255, 0, 0), 3)
# Add the text on the image
cv2.putText(image, text, (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
cv2.putText(image, "x: " + str(np.round(x,2)), (500, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(image, "y: " + str(np.round(y,2)), (500, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(image, "z: " + str(np.round(z,2)), (500, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(image, rotationMode, (500, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(image, workMode, (500, 250), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
end = time.time()
totalTime = end - start
fps = 1 / totalTime
#print("FPS: ", fps)
cv2.putText(image, f'FPS: {int(fps)}', (20,450), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0,255,0), 2)
mp_drawing.draw_landmarks(
image=image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACEMESH_TESSELATION,
landmark_drawing_spec=drawing_spec,
connection_drawing_spec=drawing_spec)
# drive work
if encoderOnOff == "On" :
workMode = "Switch"
counter = counter_now - counter_before
counter_befere = counter_now
if counter > 0 :
step_num = counter*2000
dir = GPIO.HIGH
elif counter < 0 :
step_num = counter*(-2000)
dir = GPIO.LOW
elif counter != 0 :
Drive(step_num, dir)
workMode = "None"
encoderOnOff = "Off"
if IROnOff == "On":
while True :
step_num = 0
IRSensing()
if step_num != 0 :
workMode = "IR"
Drive(step_num, dir)
elif step_num == 0 or IROnOff == "Off" :
workMode = "None"
break
if visionOnOff == "On" :
workMode = "vision"
step_num = 200
if rotationMode == "rotating right" :
dir = GPIO.HIGH
Drive(step_num, dir)
elif rotationMode == "rotating left" :
dir = GPIO.LOW
Drive(step_num, dir)
elif rotationMode == "stop" :
workMode = "None"
cv2.imshow('Head Pose Estimation', image)
# if you click H, exit
if cv2.waitKey(5) & 0xFF == 72:
break
cap.release()