import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from zipfile import ZipFile
from urllib.request import urlretrieve
from IPython.display import YouTubeVideo, display, Image
%matplotlib inline
Download Assets
Pose Estimation - OpenPose
In this page we will estimate realtime Multi-Person 2D Pose Estimation using Part Affinity Fields. The model will accept an image of one or more people, it will associate a key point in the major joints of the human anatomy and logically connect the key points. Produce an affinity map and confidence(probability) map.
Clothing can obscure the joints, joints can be hidden from view, and associating a joint to the right person were some of the issues in the past.
We will use the OpenPose CAFFE model that was trained on the multipurpose image dataset. Pose estimation is most useful on video clips which we will cover as well.
Setup
def download_and_unzip(url, save_path):
print(f"Downloading and extracting assests....", end="")
# Downloading zip file using urllib package.
urlretrieve(url, save_path)
try:
# Extracting zip file using the zipfile package.
with ZipFile(save_path) as z:
# Extract ZIP file contents in the same directory.
0])
z.extractall(os.path.split(save_path)[
print("Done")
except Exception as e:
print("\nInvalid file.", e)
= r"https://www.dropbox.com/s/089r2yg6aao858l/opencv_bootcamp_assets_NB14.zip?dl=1"
URL
= os.path.join(os.getcwd(), "opencv_bootcamp_assets_NB14.zip")
asset_zip_path
# Download if assest ZIP does not exists.
if not os.path.exists(asset_zip_path):
download_and_unzip(URL, asset_zip_path)
Load CAFFE Model
A typical Caffe Model has two files
- Architecture : Defined in a .prototxt file
- Weights : Defined in .caffemodel file
= "pose_deploy_linevec_faster_4_stages.prototxt"
protoFile = os.path.join("model", "pose_iter_160000.caffemodel") weightsFile
- Specifying the number of points in the model
- List the pairing of the joint numbers by their indices
- These are linkage of the joints of the human anatonamy
= 15
nPoints = [
POSE_PAIRS 0, 1],
[1, 2],
[2, 3],
[3, 4],
[1, 5],
[5, 6],
[6, 7],
[1, 14],
[14, 8],
[8, 9],
[9, 10],
[14, 11],
[11, 12],
[12, 13],
[
]
# Call the CAFFE model and have an instance of it in net
= cv2.dnn.readNetFromCaffe(protoFile, weightsFile) net
Read Image
# Read image, then convert color to match opencv
= cv2.imread("Tiger_Woods_crop.png")
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im
# Acquire the image dimensions dynamically
= im.shape[1]
inWidth = im.shape[0] inHeight
Preview Image
="Tiger_Woods.png") Image(filename
Convert Image to Blob
= (368, 368)
netInputSize = cv2.dnn.blobFromImage(im, 1.0 / 255, netInputSize, (0, 0, 0), swapRB=True, crop=False)
inpBlob net.setInput(inpBlob)
Run Inference
# Forward Pass
= net.forward()
output
# Display probability maps
=(20, 5))
plt.figure(figsizefor i in range(nPoints):
= output[0, i, :, :]
probMap = cv2.resize(probMap, (inWidth, inHeight), cv2.INTER_LINEAR)
displayMap
2, 8, i + 1)
plt.subplot("off")
plt.axis(="jet") plt.imshow(displayMap, cmap
Extract Points
# X and Y Scale
= inWidth / output.shape[3]
scaleX = inHeight / output.shape[2]
scaleY
# Empty list to store the detected keypoints
= []
points
# Treshold
= 0.1
threshold
for i in range(nPoints):
# Obtain probability map
= output[0, i, :, :]
probMap
# Find global maxima of the probMap.
= cv2.minMaxLoc(probMap)
minVal, prob, minLoc, point
# Scale the point to fit on the original image
= scaleX * point[0]
x = scaleY * point[1]
y
if prob > threshold:
# Add the point to the list if the probability is greater than the threshold
int(x), int(y)))
points.append((else:
None) points.append(
Display Points & Skeleton
= im.copy()
imPoints = im.copy()
imSkeleton
# Draw points
for i, p in enumerate(points):
8, (255, 255, 0), thickness=-1, lineType=cv2.FILLED)
cv2.circle(imPoints, p, "{}".format(i), p, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, lineType=cv2.LINE_AA)
cv2.putText(imPoints,
# Draw skeleton
for pair in POSE_PAIRS:
= pair[0]
partA = pair[1]
partB
if points[partA] and points[partB]:
255, 255, 0), 2)
cv2.line(imSkeleton, points[partA], points[partB], (8, (255, 0, 0), thickness=-1, lineType=cv2.FILLED) cv2.circle(imSkeleton, points[partA],
# Plot
=(50, 50))
plt.figure(figsize
121)
plt.subplot("off")
plt.axis(
plt.imshow(imPoints)
122)
plt.subplot("off")
plt.axis( plt.imshow(imSkeleton)