Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update FaceQuickstart-single.cs #315

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion dotnet/ComputerVision/ImageCaptioningQuickstart.cs
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ static void Main(string[] args)
AnalyzeImageUrl(client, ANALYZE_URL_IMAGE).Wait();
// </snippet_main_calls>

// AnalyzeImageLocal(client, ANALYZE_LOCAL_IMAGE).Wait();
AnalyzeImageLocal(client, ANALYZE_LOCAL_IMAGE).Wait();

Console.WriteLine("----------------------------------------------------------");
Console.WriteLine();
Expand Down
16 changes: 15 additions & 1 deletion dotnet/Face/FaceQuickstart-single.cs
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,18 @@ class Program
{
static string personGroupId = Guid.NewGuid().ToString();

// <snippet_image_url>
// URL path for the images.
const string IMAGE_BASE_URL = "https://csdx.blob.core.windows.net/resources/Face/Images/";
// </snippet_image_url>

// <snippet_creds>
// From your Face subscription in the Azure portal, get your subscription key and endpoint.
const string SUBSCRIPTION_KEY = "PASTE_YOUR_FACE_SUBSCRIPTION_KEY_HERE";
const string ENDPOINT = "PASTE_YOUR_FACE_SUBSCRIPTION_ENDPOINT_HERE";
// </snippet_creds>

static void Main(string[] args)
static void Main(string[] args)
{
// Recognition model 4 was released in 2021 February.
// It is recommended since its accuracy is improved
Expand All @@ -32,15 +35,18 @@ static void Main(string[] args)
// with models 1 and 2.
const string RECOGNITION_MODEL4 = RecognitionModel.Recognition04;

// <snippet_main_call>
// Authenticate.
IFaceClient client = Authenticate(ENDPOINT, SUBSCRIPTION_KEY);

// Identify - recognize a face(s) in a person group (a person group is created in this example).
IdentifyInPersonGroup(client, IMAGE_BASE_URL, RECOGNITION_MODEL4).Wait();
// </snippet_main_call>

Console.WriteLine("End of quickstart.");
}

// <snippet_auth>
/*
* AUTHENTICATE
* Uses subscription key and region to create a client.
Expand All @@ -49,7 +55,9 @@ public static IFaceClient Authenticate(string endpoint, string key)
{
return new FaceClient(new ApiKeyServiceClientCredentials(key)) { Endpoint = endpoint };
}
// </snippet_auth>

// <snippet_detect_face_with_quality>
// Detect faces from image url for recognition purpose. This is a helper method for other functions in this quickstart.
// Parameter `returnFaceId` of `DetectWithUrlAsync` must be set to `true` (by default) for recognition purpose.
// Parameter `FaceAttributes` is set to include the QualityForRecognition attribute.
Expand All @@ -73,6 +81,7 @@ private static async Task<List<DetectedFace>> DetectFaceRecognize(IFaceClient fa

return sufficientQualityFaces.ToList();
}
// </snippet_detect_face_with_quality>

/*
* IDENTIFY FACES
Expand Down Expand Up @@ -142,7 +151,9 @@ public static async Task IdentifyInPersonGroup(IFaceClient client, string url, s
$"{url}{similarImage}", similarImage);
}
}
// </snippet_persongroup_create>

// <snippet_persongroup_train>
// Start to train the person group.
Console.WriteLine();
Console.WriteLine($"Train person group {personGroupId}.");
Expand All @@ -157,6 +168,7 @@ public static async Task IdentifyInPersonGroup(IFaceClient client, string url, s
if (trainingStatus.Status == TrainingStatusType.Succeeded) { break; }
}
Console.WriteLine();
// </snippet_persongroup_train>

List<Guid> sourceFaceIds = new List<Guid>();
// Detect faces from source image url.
Expand All @@ -165,6 +177,7 @@ public static async Task IdentifyInPersonGroup(IFaceClient client, string url, s
// Add detected faceId to sourceFaceIds.
foreach (var detectedFace in detectedFaces) { sourceFaceIds.Add(detectedFace.FaceId.Value); }

// <snippet_identify_face>
// Identify the faces in a person group.
var identifyResults = await client.Face.IdentifyAsync(sourceFaceIds, personGroupId);

Expand All @@ -178,6 +191,7 @@ public static async Task IdentifyInPersonGroup(IFaceClient client, string url, s
Console.WriteLine($"Person '{person.Name}' is identified for the face in: {sourceImageFileName} - {identifyResult.FaceId}," +
$" confidence: {identifyResult.Candidates[0].Confidence}.");
}
// </snippet_identify_face>
Console.WriteLine();
}
}
Expand Down
222 changes: 222 additions & 0 deletions dotnet/Face/IdentityVerification.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,222 @@
// <snippet_single>
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;

using Microsoft.Azure.CognitiveServices.Vision.Face;
using Microsoft.Azure.CognitiveServices.Vision.Face.Models;

namespace FaceQuickstart
{
class Program
{
static string personGroupId = Guid.NewGuid().ToString();

// <snippet_image_url>
// URL path for the images.
const string IMAGE_BASE_URL = "https://csdx.blob.core.windows.net/resources/Face/Images/";
// </snippet_image_url>

// <snippet_creds>
// From your Face subscription in the Azure portal, get your subscription key and endpoint.
const string SUBSCRIPTION_KEY = "PASTE YOUR KEY";
const string ENDPOINT = "PASTE YOUR ENDPOINT";
// </snippet_creds>

static void Main(string[] args)
{
// Recognition model 4 was released in 2021 February.
// It is recommended since its accuracy is improved
// on faces wearing masks compared with model 3,
// and its overall accuracy is improved compared
// with models 1 and 2.
const string RECOGNITION_MODEL4 = RecognitionModel.Recognition04;

// <snippet_main_call>
// Authenticate.
IFaceClient client = Authenticate(ENDPOINT, SUBSCRIPTION_KEY);

// Identify - recognize a face(s) in a person group (a person group is created in this example).
IdentifyInPersonGroup(client, IMAGE_BASE_URL, RECOGNITION_MODEL4).Wait();
// </snippet_main_call>

Console.WriteLine("End of quickstart.");
}

// <snippet_auth>
/*
* AUTHENTICATE
* Uses subscription key and region to create a client.
*/
public static IFaceClient Authenticate(string endpoint, string key)
{
return new FaceClient(new ApiKeyServiceClientCredentials(key)) { Endpoint = endpoint };
}
// </snippet_auth>

// <snippet_detect_face_with_quality>
// Detect faces from image url for recognition purpose. This is a helper method for other functions in this quickstart.
// Parameter `returnFaceId` of `DetectWithUrlAsync` must be set to `true` (by default) for recognition purpose.
// Parameter `FaceAttributes` is set to include the QualityForRecognition attribute.
// Recognition model must be set to recognition_03 or recognition_04 as a result.
// Result faces with insufficient quality for recognition are filtered out.
// The field `faceId` in returned `DetectedFace`s will be used in Face - Find Similar, Face - Verify. and Face - Identify.
// It will expire 24 hours after the detection call.
private static async Task<List<DetectedFace>> DetectFaceRecognize(IFaceClient faceClient, string url, string recognition_model)
{
// Detect faces from image URL. Since only recognizing, use the recognition model 1.
// We use detection model 3 because we are not retrieving attributes.
IList<DetectedFace> detectedFaces = await faceClient.Face.DetectWithUrlAsync(url, recognitionModel: recognition_model, detectionModel: DetectionModel.Detection03, returnFaceAttributes: new List<FaceAttributeType> { FaceAttributeType.QualityForRecognition });
List<DetectedFace> sufficientQualityFaces = new List<DetectedFace>();
foreach (DetectedFace detectedFace in detectedFaces){
var faceQualityForRecognition = detectedFace.FaceAttributes.QualityForRecognition;
if (faceQualityForRecognition.HasValue && (faceQualityForRecognition.Value >= QualityForRecognition.Medium)){
sufficientQualityFaces.Add(detectedFace);
}
}
Console.WriteLine($"{detectedFaces.Count} face(s) with {sufficientQualityFaces.Count} having sufficient quality for recognition detected from image `{Path.GetFileName(url)}`");

return sufficientQualityFaces.ToList();
}
// </snippet_detect_face_with_quality>

/*
* IDENTIFY FACES
* To identify faces, you need to create and define a person group.
* The Identify operation takes one or several face IDs from DetectedFace or PersistedFace and a PersonGroup and returns
* a list of Person objects that each face might belong to. Returned Person objects are wrapped as Candidate objects,
* which have a prediction confidence value.
*/
// <snippet_persongroup_files>
public static async Task IdentifyInPersonGroup(IFaceClient client, string url, string recognitionModel)
{
Console.WriteLine("========IDENTIFY FACES========");
Console.WriteLine();

// Create a dictionary for all your images, grouping similar ones under the same key.
Dictionary<string, string[]> personDictionary =
new Dictionary<string, string[]>
{ { "Family1-Dad", new[] { "Family1-Dad1.jpg", "Family1-Dad2.jpg" } },
{ "Family1-Mom", new[] { "Family1-Mom1.jpg", "Family1-Mom2.jpg" } },
{ "Family1-Son", new[] { "Family1-Son1.jpg", "Family1-Son2.jpg" } },
{ "Family1-Daughter", new[] { "Family1-Daughter1.jpg", "Family1-Daughter2.jpg" } },
{ "Family2-Lady", new[] { "Family2-Lady1.jpg", "Family2-Lady2.jpg" } },
{ "Family2-Man", new[] { "Family2-Man1.jpg", "Family2-Man2.jpg" } }
};
// A group photo that includes some of the persons you seek to identify from your dictionary.
string sourceImageFileName = "identification1.jpg";
// </snippet_persongroup_files>

// <snippet_persongroup_create>
// Create a person group.
Console.WriteLine($"Create a person group ({personGroupId}).");
await client.PersonGroup.CreateAsync(personGroupId, personGroupId, recognitionModel: recognitionModel);
// The similar faces will be grouped into a single person group person.
foreach (var groupedFace in personDictionary.Keys)
{
// Limit TPS
await Task.Delay(250);
Person person = await client.PersonGroupPerson.CreateAsync(personGroupId: personGroupId, name: groupedFace);
Console.WriteLine($"Create a person group person '{groupedFace}'.");

// Add face to the person group person.
foreach (var similarImage in personDictionary[groupedFace])
{
Console.WriteLine($"Check whether image is of sufficient quality for recognition");
IList<DetectedFace> detectedFaces1 = await client.Face.DetectWithUrlAsync($"{url}{similarImage}",
recognitionModel: recognitionModel,
detectionModel: DetectionModel.Detection03,
returnFaceAttributes: new List<FaceAttributeType> { FaceAttributeType.QualityForRecognition });
bool sufficientQuality = true;
foreach (var face1 in detectedFaces1)
{
var faceQualityForRecognition = face1.FaceAttributes.QualityForRecognition;
// Only "high" quality images are recommended for person enrollment
if (faceQualityForRecognition.HasValue && (faceQualityForRecognition.Value != QualityForRecognition.High)){
sufficientQuality = false;
break;
}
}

if (!sufficientQuality){
continue;
}


Console.WriteLine($"Add face to the person group person({groupedFace}) from image `{similarImage}`");
PersistedFace face = await client.PersonGroupPerson.AddFaceFromUrlAsync(personGroupId, person.PersonId,
$"{url}{similarImage}", similarImage);
}
}
// </snippet_persongroup_create>

// <snippet_persongroup_train>
// Start to train the person group.
Console.WriteLine();
Console.WriteLine($"Train person group {personGroupId}.");
await client.PersonGroup.TrainAsync(personGroupId);

// Wait until the training is completed.
while (true)
{
await Task.Delay(1000);
var trainingStatus = await client.PersonGroup.GetTrainingStatusAsync(personGroupId);
Console.WriteLine($"Training status: {trainingStatus.Status}.");
if (trainingStatus.Status == TrainingStatusType.Succeeded) { break; }
}
Console.WriteLine();
// </snippet_persongroup_train>

List<Guid> sourceFaceIds = new List<Guid>();
// Detect faces from source image url.
List<DetectedFace> detectedFaces = await DetectFaceRecognize(client, $"{url}{sourceImageFileName}", recognitionModel);

// Add detected faceId to sourceFaceIds.
foreach (var detectedFace in detectedFaces) { sourceFaceIds.Add(detectedFace.FaceId.Value); }

// <snippet_identify_face>
// Identify the faces in a person group.
var identifyResults = await client.Face.IdentifyAsync(sourceFaceIds, personGroupId);

foreach (var identifyResult in identifyResults)
{
if (identifyResult.Candidates.Count==0) {
Console.WriteLine($"No person is identified for the face in: {sourceImageFileName} - {identifyResult.FaceId},");
continue;
}
Person person = await client.PersonGroupPerson.GetAsync(personGroupId, identifyResult.Candidates[0].PersonId);
Console.WriteLine($"Person '{person.Name}' is identified for the face in: {sourceImageFileName} - {identifyResult.FaceId}," +
$" confidence: {identifyResult.Candidates[0].Confidence}.");
}
// </snippet_identify_face>
Console.WriteLine();

// <snippet_verify_face>
Console.WriteLine("verify");
IList<Person> persons = await client.PersonGroupPerson.ListAsync(personGroupId);
Guid faceId = detectedFaces[0].FaceId.Value;
foreach (var person in persons)
{
Console.WriteLine($"faceID: {faceId}");
try
{
VerifyResult result = await client.Face.VerifyFaceToPersonAsync(faceId, person.PersonId, personGroupId);
if (result.IsIdentical)
{
Console.WriteLine($"verify face {faceId} is person {person.Name}");
}
}
catch (APIErrorException e)
{
Console.WriteLine(e.Response);
}

}
// </snippet_verify_face>
}
}
}
// </snippet_single>
4 changes: 2 additions & 2 deletions python/ComputerVision/ImageCaptioningQuickstart.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,10 +83,10 @@

# Get the captions (descriptions) from the response, with confidence level
print("Description of local image: ")
if ( not description_result.description):
if ( not description_result.captions):
print("No description detected.")
else:
for caption in description_result.description.captions:
for caption in description_result.captions:
print("'{}' with confidence {:.2f}%".format(caption.text, caption.confidence * 100))
print()
'''
Expand Down