微软人脸识别SDK使用
官方教程
微软Azure中包含了一些人工智能的相关组件,通过webapi请求的方式获取数据,下面是几个例子
document
JQuery Web
<!DOCTYPE html>
<html>
<head>
<title>Detect Faces Sample</title>
<!-- <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.9.0/jquery.min.js"></script> -->
</head>
<body>
<h1>Detect Faces:</h1>
Enter the URL to an image that includes a face or faces, then click
the <strong>Analyze face</strong> button.<br><br>
<input type="file" id="localFile" name="localFile"/>
Image to analyze: <input type="text" name="inputImage" id="inputImage"
value="http://article-fd.zol-img.com.cn/t_s640x2000/g5/M00/08/07/ChMkJltn_xyIQOz4AABFFxqLxD4AAqcFgF5b28AAEUv569.jpg" />
<button onclick="processImage()">Analyze face</button><br><br>
<div id="wrapper" style="width:1020px; display:table;">
<div id="jsonOutput" style="width:600px; display:table-cell;">
Response:<br><br>
<textarea id="responseTextArea" class="UIInput" style="width:580px; height:400px;"></textarea>
</div>
<div id="imageDiv" style="width:420px; display:table-cell;">
Source image:<br><br>
<img id="sourceImage" width="400" />
</div>
</div>
</body>
<script src="jquery-3.4.1.min.js"></script>
<script type="text/javascript">
function processImage() {
// Replace <Subscription Key> with your valid subscription key.
var subscriptionKey = "******";
var uriBase =
"https://westcentralus.api.cognitive.microsoft.com/face/v1.0/detect";
// Request parameters.
var params = {
"returnFaceId": "true",
"returnFaceLandmarks": "false",
"returnFaceAttributes": "age,gender,headPose,smile,facialHair,glasses,emotion," +
"hair,makeup,occlusion,accessories,blur,exposure,noise"
};
// Display the image.
var sourceImageUrl = document.getElementById("inputImage").value;
document.querySelector("#sourceImage").src = sourceImageUrl;
// Perform the REST API call.
$.ajax({
url: uriBase + "?" + $.param(params),
// Request headers.
beforeSend: function (xhrObj) {
xhrObj.setRequestHeader("Content-Type", "application/json");
xhrObj.setRequestHeader("Ocp-Apim-Subscription-Key", subscriptionKey);
},
type: "POST",
// Request body.
data: '{"url": ' + '"' + sourceImageUrl + '"}',
})
.done(function (data) {
// Show formatted JSON on webpage.
$("#responseTextArea").val(JSON.stringify(data, null, 2));
})
.fail(function (jqXHR, textStatus, errorThrown) {
// Display error message.
var errorString = (errorThrown === "") ?
"Error. " : errorThrown + " (" + jqXHR.status + "): ";
errorString += (jqXHR.responseText === "") ?
"" : (jQuery.parseJSON(jqXHR.responseText).message) ?
jQuery.parseJSON(jqXHR.responseText).message :
jQuery.parseJSON(jqXHR.responseText).error.message;
alert(errorString);
});
};
</script>
</html>
Node.js
'use strict';
const request = require('request');
// Replace <Subscription Key> with your valid subscription key.
const subscriptionKey = '*******';
// You must use the same location in your REST call as you used to get your
// subscription keys. For example, if you got your subscription keys from
// westus, replace "westcentralus" in the URL below with "westus".
const uriBase = "https://westcentralus.api.cognitive.microsoft.com/face/v1.0/detect";
const imageUrl =
'https://upload.wikimedia.org/wikipedia/commons/3/37/Dagestani_man_and_woman.jpg';
// Request parameters.
const params = {
'returnFaceId': 'true',
'returnFaceLandmarks': 'false',
'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,' +
'emotion,hair,makeup,occlusion,accessories,blur,exposure,noise'
};
const options = {
uri: uriBase,
qs: params,
body: '{"url": ' + '"' + imageUrl + '"}',
headers: {
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key' : subscriptionKey
}
};
request.post(options, (error, response, body) => {
if (error) {
console.log('Error: ', error);
return;
}
let jsonResponse = JSON.stringify(JSON.parse(body), null, ' ');
console.log('JSON Response\n');
console.log(jsonResponse);
});
测试结果
C# WPF
- MainWindow.xaml.cs
// <snippet_using>
using Microsoft.Azure.CognitiveServices.Vision.Face;
using Microsoft.Azure.CognitiveServices.Vision.Face.Models;
using System;
using System.Collections.Generic;
using System.IO;
using System.Text;
using System.Threading.Tasks;
using System.Windows;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
// </snippet_using>
namespace FaceTutorial
{
public partial class MainWindow : Window
{
// <snippet_mainwindow_fields>
// Add your Face subscription key to your environment variables.
private const string subscriptionKey = "690577240c594f1cae0b34018c15a207";// Environment.GetEnvironmentVariable("FACE_SUBSCRIPTION_KEY");
// Add your Face endpoint to your environment variables.
private const string faceEndpoint = "https://westcentralus.api.cognitive.microsoft.com/face";// Environment.GetEnvironmentVariable("FACE_ENDPOINT");
private readonly IFaceClient faceClient = new FaceClient(
new ApiKeyServiceClientCredentials(subscriptionKey),
new System.Net.Http.DelegatingHandler[] { });
// The list of detected faces.
private IList<DetectedFace> faceList;
// The list of descriptions for the detected faces.
private string[] faceDescriptions;
// The resize factor for the displayed image.
private double resizeFactor;
private const string defaultStatusBarText =
"Place the mouse pointer over a face to see the face description.";
// </snippet_mainwindow_fields>
// <snippet_mainwindow_constructor>
public MainWindow()
{
InitializeComponent();
if (Uri.IsWellFormedUriString(faceEndpoint, UriKind.Absolute))
{
faceClient.Endpoint = faceEndpoint;
}
else
{
MessageBox.Show(faceEndpoint,
"Invalid URI", MessageBoxButton.OK, MessageBoxImage.Error);
Environment.Exit(0);
}
}
// </snippet_mainwindow_constructor>
// <snippet_browsebuttonclick_start>
// Displays the image and calls UploadAndDetectFaces.
private async void BrowseButton_Click(object sender, RoutedEventArgs e)
{
// Get the image file to scan from the user.
var openDlg = new Microsoft.Win32.OpenFileDialog();
openDlg.Filter = "JPEG Image(*.jpg)|*.jpg";
bool? result = openDlg.ShowDialog(this);
// Return if canceled.
if (!(bool)result)
{
return;
}
// Display the image file.
string filePath = openDlg.FileName;
Uri fileUri = new Uri(filePath);
BitmapImage bitmapSource = new BitmapImage();
bitmapSource.BeginInit();
bitmapSource.CacheOption = BitmapCacheOption.None;
bitmapSource.UriSource = fileUri;
bitmapSource.EndInit();
FacePhoto.Source = bitmapSource;
// </snippet_browsebuttonclick_start>
// <snippet_browsebuttonclick_mid>
// Detect any faces in the image.
Title = "Detecting...";
faceList = await UploadAndDetectFaces(filePath);
Title = String.Format(
"Detection Finished. {0} face(s) detected", faceList.Count);
if (faceList.Count > 0)
{
// Prepare to draw rectangles around the faces.
DrawingVisual visual = new DrawingVisual();
DrawingContext drawingContext = visual.RenderOpen();
drawingContext.DrawImage(bitmapSource,
new Rect(0, 0, bitmapSource.Width, bitmapSource.Height));
double dpi = bitmapSource.DpiX;
// Some images don't contain dpi info.
resizeFactor = (dpi == 0) ? 1 : 96 / dpi;
faceDescriptions = new String[faceList.Count];
for (int i = 0; i < faceList.Count; ++i)
{
DetectedFace face = faceList[i];
// Draw a rectangle on the face.
drawingContext.DrawRectangle(
Brushes.Transparent,
new Pen(Brushes.Red, 2),
new Rect(
face.FaceRectangle.Left * resizeFactor,
face.FaceRectangle.Top * resizeFactor,
face.FaceRectangle.Width * resizeFactor,
face.FaceRectangle.Height * resizeFactor
)
);
// Store the face description.
faceDescriptions[i] = FaceDescription(face);
}
drawingContext.Close();
// Display the image with the rectangle around the face.
RenderTargetBitmap faceWithRectBitmap = new RenderTargetBitmap(
(int)(bitmapSource.PixelWidth * resizeFactor),
(int)(bitmapSource.PixelHeight * resizeFactor),
96,
96,
PixelFormats.Pbgra32);
faceWithRectBitmap.Render(visual);
FacePhoto.Source = faceWithRectBitmap;
// Set the status bar text.
faceDescriptionStatusBar.Text = defaultStatusBarText;
}
// </snippet_browsebuttonclick_mid>
// <snippet_browsebuttonclick_end>
}
// </snippet_browsebuttonclick_end>
// <snippet_mousemove_start>
// Displays the face description when the mouse is over a face rectangle.
private void FacePhoto_MouseMove(object sender, MouseEventArgs e)
{
// </snippet_mousemove_start>
// <snippet_mousemove_mid>
// If the REST call has not completed, return.
if (faceList == null)
return;
// Find the mouse position relative to the image.
Point mouseXY = e.GetPosition(FacePhoto);
ImageSource imageSource = FacePhoto.Source;
BitmapSource bitmapSource = (BitmapSource)imageSource;
// Scale adjustment between the actual size and displayed size.
var scale = FacePhoto.ActualWidth / (bitmapSource.PixelWidth / resizeFactor);
// Check if this mouse position is over a face rectangle.
bool mouseOverFace = false;
for (int i = 0; i < faceList.Count; ++i)
{
FaceRectangle fr = faceList[i].FaceRectangle;
double left = fr.Left * scale;
double top = fr.Top * scale;
double width = fr.Width * scale;
double height = fr.Height * scale;
// Display the face description if the mouse is over this face rectangle.
if (mouseXY.X >= left && mouseXY.X <= left + width &&
mouseXY.Y >= top && mouseXY.Y <= top + height)
{
faceDescriptionStatusBar.Text = faceDescriptions[i];
mouseOverFace = true;
break;
}
}
// String to display when the mouse is not over a face rectangle.
if (!mouseOverFace) faceDescriptionStatusBar.Text = defaultStatusBarText;
// </snippet_mousemove_mid>
// <snippet_mousemove_end>
}
// </snippet_mousemove_end>
// <snippet_uploaddetect>
// Uploads the image file and calls DetectWithStreamAsync.
private async Task<IList<DetectedFace>> UploadAndDetectFaces(string imageFilePath)
{
// The list of Face attributes to return.
IList<FaceAttributeType> faceAttributes =
new FaceAttributeType[]
{
FaceAttributeType.Gender, FaceAttributeType.Age,
FaceAttributeType.Smile, FaceAttributeType.Emotion,
FaceAttributeType.Glasses, FaceAttributeType.Hair
};
// Call the Face API.
try
{
using (Stream imageFileStream = File.OpenRead(imageFilePath))
{
// The second argument specifies to return the faceId, while
// the third argument specifies not to return face landmarks.
IList<DetectedFace> faceList =
await faceClient.Face.DetectWithStreamAsync(
imageFileStream, true, false, faceAttributes);
return faceList;
}
}
// Catch and display Face API errors.
catch (APIErrorException f)
{
MessageBox.Show(f.Message);
return new List<DetectedFace>();
}
// Catch and display all other errors.
catch (Exception e)
{
MessageBox.Show(e.Message, "Error");
return new List<DetectedFace>();
}
}
// </snippet_uploaddetect>
// <snippet_facedesc>
// Creates a string out of the attributes describing the face.
private string FaceDescription(DetectedFace face)
{
StringBuilder sb = new StringBuilder();
sb.Append("Face: ");
// Add the gender, age, and smile.
sb.Append(face.FaceAttributes.Gender);
sb.Append(", ");
sb.Append(face.FaceAttributes.Age);
sb.Append(", ");
sb.Append(String.Format("smile {0:F1}%, ", face.FaceAttributes.Smile * 100));
// Add the emotions. Display all emotions over 10%.
sb.Append("Emotion: ");
Emotion emotionScores = face.FaceAttributes.Emotion;
if (emotionScores.Anger >= 0.1f) sb.Append(
String.Format("anger {0:F1}%, ", emotionScores.Anger * 100));
if (emotionScores.Contempt >= 0.1f) sb.Append(
String.Format("contempt {0:F1}%, ", emotionScores.Contempt * 100));
if (emotionScores.Disgust >= 0.1f) sb.Append(
String.Format("disgust {0:F1}%, ", emotionScores.Disgust * 100));
if (emotionScores.Fear >= 0.1f) sb.Append(
String.Format("fear {0:F1}%, ", emotionScores.Fear * 100));
if (emotionScores.Happiness >= 0.1f) sb.Append(
String.Format("happiness {0:F1}%, ", emotionScores.Happiness * 100));
if (emotionScores.Neutral >= 0.1f) sb.Append(
String.Format("neutral {0:F1}%, ", emotionScores.Neutral * 100));
if (emotionScores.Sadness >= 0.1f) sb.Append(
String.Format("sadness {0:F1}%, ", emotionScores.Sadness * 100));
if (emotionScores.Surprise >= 0.1f) sb.Append(
String.Format("surprise {0:F1}%, ", emotionScores.Surprise * 100));
// Add glasses.
sb.Append(face.FaceAttributes.Glasses);
sb.Append(", ");
// Add hair.
sb.Append("Hair: ");
// Display baldness confidence if over 1%.
if (face.FaceAttributes.Hair.Bald >= 0.01f)
sb.Append(String.Format("bald {0:F1}% ", face.FaceAttributes.Hair.Bald * 100));
// Display all hair color attributes over 10%.
IList<HairColor> hairColors = face.FaceAttributes.Hair.HairColor;
foreach (HairColor hairColor in hairColors)
{
if (hairColor.Confidence >= 0.1f)
{
sb.Append(hairColor.Color.ToString());
sb.Append(String.Format(" {0:F1}% ", hairColor.Confidence * 100));
}
}
// Return the built string.
return sb.ToString();
}
// </snippet_facedesc>
}
}
2.MainWindow.xaml
<!-- <snippet_xaml> -->
<Window x:Class="FaceTutorial.MainWindow"
xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
Title="MainWindow" Height="700" Width="960">
<Grid x:Name="BackPanel">
<Image x:Name="FacePhoto" Stretch="Uniform" Margin="0,0,0,50" MouseMove="FacePhoto_MouseMove" />
<DockPanel DockPanel.Dock="Bottom">
<Button x:Name="BrowseButton" Width="72" Height="20" VerticalAlignment="Bottom" HorizontalAlignment="Left"
Content="Browse..."
Click="BrowseButton_Click" />
<StatusBar VerticalAlignment="Bottom">
<StatusBarItem>
<TextBlock Name="faceDescriptionStatusBar" />
</StatusBarItem>
</StatusBar>
</DockPanel>
</Grid>
</Window>
<!-- </snippet_xaml> -->
参考链接
1.https://github.com/Azure/azure-sdk-for-net/tree/master/sdk