c# – 如何通过提供Windows.Media.FaceAnalysis DetectedFace列表使用Microsoft Cognitive服务检测面部属性?
作者:互联网
我可以从Live Web Cam获取面向Windows.Media.FaceAnalysis DetectedFace对象的列表.现在,我想将这些面向Microsoft Cognitive Services API传递,以检测面并获取面属性.我怎样才能做到这一点?
IList<DetectedFace> faces = null;
// Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case).
// GetPreviewFrame will convert the native webcam frame into this format.
const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;
using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
{
await this.mediaCapture.GetPreviewFrameAsync(previewFrame);
// The returned VideoFrame should be in the supported NV12 format but we need to verify this.
if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
{
faces = await this.faceDetector.DetectFacesAsync(previewFrame.SoftwareBitmap);
// Now pass this faces to Cognitive services API
// faceClient.DetectAsync
}
}
解决方法:
DetectedFace对象包含实际面的边界框.因此,您可以使用此知识创建面部的内存流并将其发送到Face Client.
private async Task DetectAsync()
{
IList<DetectedFace> faces = null;
const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;
using (VideoFrame destinationPreviewFrame = new VideoFrame(InputPixelFormat, 640, 480))
{
await this._mediaCapture.GetPreviewFrameAsync(destinationPreviewFrame);
if (FaceDetector.IsBitmapPixelFormatSupported(InputPixelFormat))
{
faces = await this.faceDetector.DetectFacesAsync(destinationPreviewFrame.SoftwareBitmap);
foreach (var face in faces)
{
// convert NV12 to RGBA16 format
SoftwareBitmap convertedBitmap = SoftwareBitmap.Convert(destinationPreviewFrame.SoftwareBitmap, BitmapPixelFormat.Rgba16);
// get the raw bytes of the detected face
byte[] rawBytes = await GetBytesFromBitmap(convertedBitmap, BitmapEncoder.BmpEncoderId, face.FaceBox);
// read the bitmap and send it to the face client
using (Stream stream = rawBytes.AsBuffer().AsStream())
{
var faceAttributesToReturn = new List<FaceAttributeType>()
{
FaceAttributeType.Age,
FaceAttributeType.Emotion,
FaceAttributeType.Hair
};
Face[] detectedFaces = await this.faceClient.DetectAsync(stream, true, true, faceAttributesToReturn);
Debug.Assert(detectedFaces.Length > 0);
}
}
}
}
}
private async Task<byte[]> GetBytesFromBitmap(SoftwareBitmap soft, Guid encoderId, BitmapBounds bounds)
{
byte[] array = null;
using (var ms = new InMemoryRandomAccessStream())
{
BitmapEncoder encoder = await BitmapEncoder.CreateAsync(encoderId, ms);
encoder.SetSoftwareBitmap(soft);
// apply the bounds of the face
encoder.BitmapTransform.Bounds = bounds;
await encoder.FlushAsync();
array = new byte[ms.Size];
await ms.ReadAsync(array.AsBuffer(), (uint)ms.Size, InputStreamOptions.None);
}
return array;
}
标签:c,uwp,microsoft-cognitive,face-api,azure-cognitive-services 来源: https://codeday.me/bug/20190701/1348909.html