Skip to content
Snippets Groups Projects
Commit 2578e1b9 authored by Yash Shah's avatar Yash Shah
Browse files

Initial commit

parents
No related branches found
No related tags found
No related merge requests found
# Raspberry Pi 4 Facial Recognition
Full Tutorial posted - https://www.tomshardware.com/how-to/raspberry-pi-facial-recognition
![RaspberryPi Facial Rec](https://github.com/carolinedunn/facial_recognition/blob/main/photo/screenshot.png)
Materials: Raspberry Pi 4 and Webcam
![RaspberryPi Facial Rec](https://github.com/carolinedunn/facial_recognition/blob/main/photo/webcamandRPi4.JPG)
Full Tutorial posted - https://www.tomshardware.com/how-to/raspberry-pi-facial-recognition
#! /usr/bin/python
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
import face_recognition
import imutils
import pickle
import time
import cv2
# Import smtplib for the actual sending function
import smtplib
import subprocess
import time
# Here are the email package modules we'll need
from email.mime.application import MIMEApplication
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from subprocess import call
# Setting up Mail
SMTP_USERNAME = "iotg16@outlook.com" # Mail id of the sender
SMTP_PASSWORD = "Group16!" # Password of the sender
SMTP_RECIPIENT = "yashshahq@gmail.com" # Mail id of the receiver
SMTP_SERVER = "smtp.office365.com" # Address of the SMTP server
SSL_PORT = 587
# Initialize 'currentname' to trigger only when a new person is identified.
currentname = "unknown"
# Determine faces from encodings.pickle file model created from train_model.py
encodingsP = "encodings.pickle"
# load the known faces and embeddings along with OpenCV's Haar
# cascade for face detection
print("[INFO] loading encodings + face detector...")
data = pickle.loads(open(encodingsP, "rb").read())
# initialize the video stream and allow the camera sensor to warm up
# Set the ser to the followng
# src = 0 : for the build in single web cam, could be your laptop webcam
# src = 2 : I had to set it to 2 inorder to use the USB webcam attached to my laptop
# vs = VideoStream(src=2,framerate=10).start()
vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
# start the FPS counter
fps = FPS().start()
email_count = 0
frame_count = 0
# loop over frames from the video file stream
while True:
# grab the frame from the threaded video stream and resize it
# to 500px (to speedup processing)
frame = vs.read()
frame = imutils.resize(frame, width=500)
# Detect the fce boxes
boxes = face_recognition.face_locations(frame)
# compute the facial embeddings for each face bounding box
encodings = face_recognition.face_encodings(frame, boxes)
names = []
# loop over the facial embeddings
for encoding in encodings:
# attempt to match each face in the input image to our known
# encodings
matches = face_recognition.compare_faces(data["encodings"],
encoding)
name = "Unknown" # if face is not recognized, then print Unknown
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number
# of votes (note: in the event of an unlikely tie Python
# will select first entry in the dictionary)
name = max(counts, key=counts.get)
# If someone in your dataset is identified, print their name on the screen
if currentname != name:
currentname = name
print(currentname)
# update the list of names
names.append(name)
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
# draw the predicted face name on the image - color is in BGR
cv2.rectangle(frame, (left, top), (right, bottom),
(0, 255, 225), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
.8, (0, 255, 255), 2)
# display the image to our screen
cv2.imshow("Facial Recognition is Running", frame)
if names != []:
if email_count == 0:
cv2.imwrite('face_rec_frame.png', frame)
email_count += 1
# Connecting to mail Server
p = subprocess.Popen(["runlevel"], stdout=subprocess.PIPE)
out, err = p.communicate()
if out[2] == "0":
print("Halt detected")
exit(0)
if out[2] == "6":
print("Shutdown detected")
exit(0)
print("Connected to mail")
# Create the container (outer) email message
TO = SMTP_RECIPIENT
FROM = SMTP_USERNAME
msg = MIMEMultipart("alternative")
msg["Subject"] = "We detected someone at your door."
msg.preamble = "Rpi Sends image"
# Attach the image
fp = open("face_rec_frame.png", "rb")
img = MIMEImage(fp.read())
fp.close()
msg.attach(img)
# Attach the video
with open("../video.zip", "rb") as file:
msg.attach(MIMEApplication(file.read(), Name=names[0] + "_video.zip"))
# Connecting to SMTP server and sending email
server = smtplib.SMTP(SMTP_SERVER, SSL_PORT)
server.ehlo()
server.starttls()
server.login(SMTP_USERNAME, SMTP_PASSWORD)
server.sendmail(FROM, [TO], msg.as_string())
server.quit()
print("Mail sent successfully.")
key = cv2.waitKey(1) & 0xFF
# quit when 'q' key is pressed
if key == ord("q"):
break
if frame_count <= 10:
frame_count += 1
if frame_count > 10:
break
# update the FPS counter
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
#! /usr/bin/python
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
import face_recognition
import imutils
import pickle
import time
import cv2
import requests
#Initialize 'currentname' to trigger only when a new person is identified.
currentname = "unknown"
#Determine faces from encodings.pickle file model created from train_model.py
encodingsP = "encodings.pickle"
#use this xml file
cascade = "haarcascade_frontalface_default.xml"
#function for setting up emails
def send_message(name):
return requests.post(
"https://api.mailgun.net/v3/YOUR_DOMAIN_NAME/messages",
auth=("api", "YOUR_API_KEY"),
files = [("attachment", ("image.jpg", open("image.jpg", "rb").read()))],
data={"from": 'hello@example.com',
"to": ["YOUR_MAILGUN_EMAIL_ADDRESS"],
"subject": "You have a visitor",
"html": "<html>" + name + " is at your door. </html>"})
# load the known faces and embeddings along with OpenCV's Haar
# cascade for face detection
print("[INFO] loading encodings + face detector...")
data = pickle.loads(open(encodingsP, "rb").read())
detector = cv2.CascadeClassifier(cascade)
# initialize the video stream and allow the camera sensor to warm up
print("[INFO] starting video stream...")
#vs = VideoStream(src=0).start()
vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
# start the FPS counter
fps = FPS().start()
# loop over frames from the video file stream
while True:
# grab the frame from the threaded video stream and resize it
# to 500px (to speedup processing)
frame = vs.read()
frame = imutils.resize(frame, width=500)
# convert the input frame from (1) BGR to grayscale (for face
# detection) and (2) from BGR to RGB (for face recognition)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# detect faces in the grayscale frame
rects = detector.detectMultiScale(gray, scaleFactor=1.1,
minNeighbors=5, minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
# OpenCV returns bounding box coordinates in (x, y, w, h) order
# but we need them in (top, right, bottom, left) order, so we
# need to do a bit of reordering
boxes = [(y, x + w, y + h, x) for (x, y, w, h) in rects]
# compute the facial embeddings for each face bounding box
encodings = face_recognition.face_encodings(rgb, boxes)
names = []
# loop over the facial embeddings
for encoding in encodings:
# attempt to match each face in the input image to our known
# encodings
matches = face_recognition.compare_faces(data["encodings"],
encoding)
name = "Unknown"
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number
# of votes (note: in the event of an unlikely tie Python
# will select first entry in the dictionary)
name = max(counts, key=counts.get)
#If someone in your dataset is identified, print their name on the screen
if currentname != name:
currentname = name
print(currentname)
#Take a picture to send in the email
img_name = "image.jpg"
cv2.imwrite(img_name, frame)
print('Taking a picture.')
#Now send me an email to let me know who is at the door
request = send_message(name)
print ('Status Code: '+format(request.status_code)) #200 status code means email sent successfully
# update the list of names
names.append(name)
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
# draw the predicted face name on the image - color is in BGR
cv2.rectangle(frame, (left, top), (right, bottom),
(0, 255, 225), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
.8, (0, 255, 255), 2)
# display the image to our screen
cv2.imshow("Facial Recognition is Running", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# update the FPS counter
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
import cv2
import os
import shutil
import time
name = input("Enter the name of the person to be identified: ")
dir = "dataset/" + name
if os.path.exists(dir):
shutil.rmtree(dir)
os.mkdir("dataset/" + name)
print("Directory created: " + name)
cam = cv2.VideoCapture(0)
cv2.namedWindow("press space to take a photo", cv2.WINDOW_NORMAL)
cv2.resizeWindow("press space to take a photo", 500, 300)
img_counter = 0
count = 0
while count < 50:
ret, frame = cam.read()
if not ret:
print("Failed to grab frame")
break
cv2.imshow("We will take 50 photos. One every 0.25 seconds.", frame)
img_name = "dataset/"+ name +"/image_{}.jpg".format(img_counter)
cv2.imwrite(img_name, frame)
print("{} written!".format(img_name))
img_counter += 1
count += 1
time.sleep(0.25)
cam.release()
cv2.destroyAllWindows()
import cv2
from picamera import PiCamera
from picamera.array import PiRGBArray
name = 'Yash_1' #replace with your name
cam = PiCamera()
cam.resolution = (512, 304)
cam.framerate = 10
rawCapture = PiRGBArray(cam, size=(512, 304))
img_counter = 0
while True:
for frame in cam.capture_continuous(rawCapture, format="bgr", use_video_port=True):
image = frame.array
cv2.imshow("Press Space to take a photo", image)
rawCapture.truncate(0)
k = cv2.waitKey(1)
rawCapture.truncate(0)
if k%256 == 27: # ESC pressed
break
elif k%256 == 32:
# SPACE pressed
img_name = "dataset/"+ name +"/image_{}.jpg".format(img_counter)
cv2.imwrite(img_name, image)
print("{} written!".format(img_name))
img_counter += 1
if k%256 == 27:
print("Escape hit, closing...")
break
cv2.destroyAllWindows()
#! /usr/bin/python
# Imports
import requests
def send_simple_message():
print("I am sending an email.")
return requests.post(
"https://api.mailgun.net/v3/YOUR_DOMAIN_NAME/messages",
auth=("api", "YOUR_API_KEY"),
data={"from": 'hello@example.com',
"subject": "Visitor Alert",
"html": "<html> Your Raspberry Pi recognizes someone. </html>"})
request = send_simple_message()
print ('Status: '+format(request.status_code))
print ('Body:'+ format(request.text))
#! /usr/bin/python
# import the necessary packages
from imutils import paths
import face_recognition
#import argparse
import pickle
import cv2
import os
# our images are located in the dataset folder
print("[INFO] start processing faces...")
imagePaths = list(paths.list_images("dataset"))
# initialize the list of known encodings and known names
knownEncodings = []
knownNames = []
# loop over the image paths
for (i, imagePath) in enumerate(imagePaths):
# extract the person name from the image path
print("[INFO] processing image {}/{}".format(i + 1,
len(imagePaths)))
name = imagePath.split(os.path.sep)[-2]
# load the input image and convert it from RGB (OpenCV ordering)
# to dlib ordering (RGB)
image = cv2.imread(imagePath)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# detect the (x, y)-coordinates of the bounding boxes
# corresponding to each face in the input image
boxes = face_recognition.face_locations(rgb,
model="hog")
# compute the facial embedding for the face
encodings = face_recognition.face_encodings(rgb, boxes)
# loop over the encodings
for encoding in encodings:
# add each encoding + name to our set of known names and
# encodings
knownEncodings.append(encoding)
knownNames.append(name)
# dump the facial encodings + names to disk
print("[INFO] serializing encodings...")
data = {"encodings": knownEncodings, "names": knownNames}
f = open("encodings.pickle", "wb")
f.write(pickle.dumps(data))
f.close()
dht2.py 0 → 100644
"""
/***************************************************************************
* Sketch Name: Lab1_code1
* Description: Arduino - Grove_Pi_Sensors
* Parameters: PIR, Light, Button, LED
* Return: Dark, Light, Movement, Watching
* Copyright: Following code is written for educational purposes by Cardiff University.
* Latest Version: 07/09/2021 (by Hakan KAYAN)
***************************************************************************/
"""
import time
import sys
import os
import grovepi
import math
import json
# Connect the Grove Temperature & Humidity Sensor Pro to digital port D4
# This example uses the blue colored sensor.
# SIG,NC,VCC,GND
sensor = 4 # The Temperature and Humidity Sensor should be connected to port D4
# temp_humidity_sensor_type
# Grove Base Kit comes with the blue sensor.
blue = 0 # The Blue colored sensor.
white = 1 # The White colored sensor.
def temp():
try:
# This example uses the blue colored sensor.
# The first parameter is the port, the second parameter is the type of sensor.
[temp, humidity] = grovepi.dht(sensor, blue)
x = json.dumps({'temperature': temp, 'humidity': humidity})
if math.isnan(temp) is False and math.isnan(humidity) is False:
print("we are here")
return x
else:
print("No data.")
except KeyboardInterrupt:
print("Terminated.")
os._exit(0)
lcd.py 0 → 100644
"""
/***************************************************************************
* Sketch Name: Lab1_code1
* Description: Arduino - Grove_Pi_Sensors
* Parameters: PIR, Light, Button, LED
* Return: Dark, Light, Movement, Watching
* Copyright: Following code is written for educational purposes by Cardiff University.
* Latest Version: 05/08/2021 (by Hakan KAYAN)
* Modified from: https://github.com/DexterInd/GrovePi.git
***************************************************************************/
"""
import time
import sys
import os
import grovepi
import math
import json
sensor = 4 # Connect the Sensor to port I2C-2.
blue = 0 # The Blue colored sensor.
if sys.platform == 'uwp':
import winrt_smbus as smbus
bus = smbus.SMBus(1)
else:
import smbus
import RPi.GPIO as GPIO
rev = GPIO.RPI_REVISION
if rev == 2 or rev == 3:
bus = smbus.SMBus(1)
else:
bus = smbus.SMBus(0)
# this device has two I2C addresses
DISPLAY_RGB_ADDR = 0x62
DISPLAY_TEXT_ADDR = 0x3e
# set backlight to (R,G,B) (values from 0..255 for each)
def setRGB(r, g, b):
bus.write_byte_data(DISPLAY_RGB_ADDR, 0, 0)
bus.write_byte_data(DISPLAY_RGB_ADDR, 1, 0)
bus.write_byte_data(DISPLAY_RGB_ADDR, 0x08, 0xaa)
bus.write_byte_data(DISPLAY_RGB_ADDR, 4, r)
bus.write_byte_data(DISPLAY_RGB_ADDR, 3, g)
bus.write_byte_data(DISPLAY_RGB_ADDR, 2, b)
# send command to display (no need for external use)
def textCommand(cmd):
bus.write_byte_data(DISPLAY_TEXT_ADDR, 0x80, cmd)
# set display text \n for second line(or auto wrap)
def setText(text):
textCommand(0x01) # clear display
time.sleep(.05)
textCommand(0x08 | 0x04) # display on, no cursor
textCommand(0x28) # 2 lines
time.sleep(.05)
count = 0
row = 0
for c in text:
if c == '\n' or count == 16:
count = 0
row += 1
if row == 2:
break
textCommand(0xc0)
if c == '\n':
continue
count += 1
bus.write_byte_data(DISPLAY_TEXT_ADDR, 0x40, ord(c))
# example code
if __name__ == "__main__":
setRGB(0, 128, 64)
time.sleep(2)
try:
# This example uses the blue colored sensor.
# The first parameter is the port, the second parameter is the type of sensor.
[temp, humidity] = grovepi.dht(sensor, blue)
time.sleep(1)
x = json.dumps(temp)
y = json.dumps(humidity)
if math.isnan(temp) is False and math.isnan(humidity) is False:
setText(x)
time.sleep(0.1)
print('The temperature today is ' + x + ' degrees, enjoy!')
print('The humidity today is ' + y + '%!')
except KeyboardInterrupt:
print("Terminated.")
os._exit(0)
# Import smtplib for the actual sending function
import smtplib
import subprocess
import time
# Here are the email package modules we'll need
from email.mime.application import MIMEApplication
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from subprocess import call
import grovepi
import cv2
import os
from dht2 import temp
from zipper import zipper
import re
print("System Working")
# Runs the lcd file which displays the temperature and humidity
call("python3 lcd.py", shell=True)
# Hardware components that need to be connected
led_status = 3 # Connect LED Button to port D3
buzzer = 5 # Connect Buzzer to port D5
ranger = 7 # Connect ranger to Port D7
# Setting up Mail
SMTP_USERNAME = "iotg16@outlook.com" # Mail id of the sender
SMTP_PASSWORD = "Group16!" # Password of the sender
SMTP_RECIPIENT = input("Please enter the email address the security footage should be sent to: ") # Mail id of the receiver
SMTP_SERVER = "smtp.office365.com" # Address of the SMTP server
SSL_PORT = 587
checker = True
while checker == True:
print()
print("The process of adding photos of someone can only be done during startup")
print()
face_req = (
input(
"Would you like to add photos of someone? No: 0, Yes: 1): "
)
)
if face_req == "1" or face_req == "0":
checker = False
else:
checker = True
print("Invalid Input. Must be 0, 1")
if face_req == "1":
call("cd facial_recognition; python3 headshots.py", shell=True)
# Code keeps on looping in case of IO Error
sustained = False
sustained_track = 0
while True:
try:
while True:
# If something is detected for 250 loops, around 3 seconds, continue.
# This reduces the chance of random motion triggering the security features.
while sustained == False:
ur_distance = grovepi.ultrasonicRead(ranger)
if ur_distance < 100:
sustained_track += 1
print(sustained_track)
print(ur_distance)
if sustained_track > 250:
sustained = True
else:
sustained_track = 0
if ur_distance < 100: # Runs only if distance is less than 100 cm
# Prints Temperature and Humidity reading.
x = temp()
# Turn on the status LED to indicate that someone has arrived or there as been movement
grovepi.digitalWrite(led_status, 1)
# Make a sound on the Buzzer when movement is detected
print("Buzzer Activated")
grovepi.analogWrite(buzzer, 1)
time.sleep(1)
grovepi.analogWrite(buzzer, 0)
print("Buzzer Stopped")
# Take a picture from the Raspberry Pi camera
# call(["raspistill -o screenshot.jpg -w 640 -h 480 -t 3000"], shell=True)
# print("Image Shot")
# Takes video from Pi camera
call(["raspivid -o myvid.h264 -w 640 -h 480 -t 6000"], shell=True)
zipper()
grovepi.digitalWrite(led_status, 0) # Turn off the LED
# Running Facial Recognition based on users input
command = "cd facial_recognition; python3 facial_req.py"
call(command, shell=True)
except IOError:
print("Error")
continue
from zipfile import ZipFile
def zipper():
zipObj = ZipFile('video.zip', 'w')
# Add multiple files to the zip
zipObj.write('myvid.h264')
# close the Zip File
zipObj.close()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment