我正在尝试从人脸获取地标,并为Generative Adversarial Network
(GAN)训练生成AandB图像。
产生AandB图片的代码:
import face_alignment
import cv2
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from skimage import io
# Run the 3D face alignment on a test image, without CUDA.
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, device='cpu', flip_input=False)
input = io.imread('datasets/cloud_faces/trainB/00000566.AN.jpg')
preds = fa.get_landmarks(input)[-1]
fig = plt.figure(figsize=plt.figaspect(.5))
# size = fig.get_size_inches()*fig.dpi # size in pixels
ax = fig.add_subplot(1, 2, 2)
ax.imshow(input)
ax.axis('off')
# Save just the portion _inside_ the second axis's boundaries
extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
fig.savefig('datasets/face_landmarks/face.png', bbox_inches=extent)
ax = fig.add_subplot(1, 2, 1, projection='3d')
surf = ax.scatter(preds[:,0]*1.2,preds[:,1],preds[:,2],c="cyan", alpha=1.0, edgecolor='b')
ax.plot3D(preds[:17,0]*1.2,preds[:17,1], preds[:17,2], color='blue' )
ax.plot3D(preds[17:22,0]*1.2,preds[17:22,1],preds[17:22,2], color='blue')
ax.plot3D(preds[22:27,0]*1.2,preds[22:27,1],preds[22:27,2], color='blue')
ax.plot3D(preds[27:31,0]*1.2,preds[27:31,1],preds[27:31,2], color='blue')
ax.plot3D(preds[31:36,0]*1.2,preds[31:36,1],preds[31:36,2], color='blue')
ax.plot3D(preds[36:42,0]*1.2,preds[36:42,1],preds[36:42,2], color='blue')
ax.plot3D(preds[42:48,0]*1.2,preds[42:48,1],preds[42:48,2], color='blue')
ax.plot3D(preds[48:,0]*1.2,preds[48:,1],preds[48:,2], color='blue' )
ax.axis('off')
ax.view_init(elev=90., azim=90.)
ax.set_xlim(ax.get_xlim()[::-1])
plt.savefig('datasets/face_landmarks/face_and_keypoints.png')
# Save just the portion _inside_ the second axis's boundaries
extent2 = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
fig.savefig('datasets/face_landmarks/keypoints.png', bbox_inches=extent2)
plt.show()
# get processed images
img1 = cv2.imread('datasets/face_landmarks/keypoints.png')
img2 = cv2.imread('datasets/face_landmarks/face.png')
# concatenate images into one
vis = np.concatenate((img1, img2), axis=1)
# save
cv2.imwrite('datasets/face_landmarks/keypoints_and_face.png', vis)
代码产生以下图像:
但是,您会注意到,脸部界标已关闭,与脸部对齐。
如何解决上面的代码调整问题?