Skip to content

Commit a0da750

Browse files
stephenpluspluscallmehiphop
authored andcommitted
vision: return errors (#1334)
* vision: return errors * tests * test coverage fixer * tests * docs * add error handling example * elaborate error handling
1 parent 4184a1a commit a0da750

5 files changed

Lines changed: 568 additions & 216 deletions

File tree

lib/common/grpc-service.js

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ var Service = require('./service.js');
3838
* @const {object} - A map of protobuf codes to HTTP status codes.
3939
* @private
4040
*/
41-
var HTTP_ERROR_CODE_MAP = {
41+
var GRPC_ERROR_CODE_TO_HTTP = {
4242
0: {
4343
code: 200,
4444
message: 'OK'
@@ -268,8 +268,8 @@ GrpcService.prototype.request = function(protoOpts, reqOpts, callback) {
268268

269269
service[protoOpts.method](reqOpts, grpcOpts, function(err, resp) {
270270
if (err) {
271-
if (HTTP_ERROR_CODE_MAP[err.code]) {
272-
respError = extend(err, HTTP_ERROR_CODE_MAP[err.code]);
271+
if (GRPC_ERROR_CODE_TO_HTTP[err.code]) {
272+
respError = extend(err, GRPC_ERROR_CODE_TO_HTTP[err.code]);
273273
onResponse(null, respError);
274274
return;
275275
}
@@ -443,3 +443,4 @@ GrpcService.prototype.getGrpcCredentials_ = function(callback) {
443443
};
444444

445445
module.exports = GrpcService;
446+
module.exports.GRPC_ERROR_CODE_TO_HTTP = GRPC_ERROR_CODE_TO_HTTP;

lib/vision/index.js

Lines changed: 207 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,12 @@ var request = require('request');
3737
*/
3838
var File = require('../storage/file.js');
3939

40+
/**
41+
* @type {module:common/grpc-service}
42+
* @private
43+
*/
44+
var GrpcService = require('../common/grpc-service.js');
45+
4046
/**
4147
* @type {module:common/service}
4248
* @private
@@ -206,6 +212,10 @@ Vision.prototype.annotate = function(requests, callback) {
206212
* `config.types`). Additionally, if multiple images were provided, you will
207213
* receive an array of detection objects, each representing an image. See
208214
* the examples below for more information.
215+
* @param {object[]} callback.detections.errors - It's possible for part of your
216+
* request to be completed successfully, while a single feature request was
217+
* not successful. Each returned detection will have an `errors` array,
218+
* including any of these errors which may have occurred.
209219
* @param {object} callback.apiResponse - Raw API response.
210220
*
211221
* @example
@@ -259,6 +269,18 @@ Vision.prototype.annotate = function(requests, callback) {
259269
* // }
260270
* // ]
261271
* });
272+
*
273+
* //-
274+
* // It's possible for part of your request to be completed successfully, while
275+
* // a single feature request was not successful. Each returned detection will
276+
* // have an `errors` array, including any of these errors which may have
277+
* // occurred.
278+
* //-
279+
* vision.detect('malformed-image.jpg', types, function(err, detections) {
280+
* if (detections.faces.errors.length > 0) {
281+
* // Errors occurred while trying to use this image for a face annotation.
282+
* }
283+
* });
262284
*/
263285
Vision.prototype.detect = function(images, options, callback) {
264286
var self = this;
@@ -292,6 +314,37 @@ Vision.prototype.detect = function(images, options, callback) {
292314
text: 'TEXT_DETECTION'
293315
};
294316

317+
var typeShortNameToRespName = {
318+
face: 'faceAnnotations',
319+
faces: 'faceAnnotations',
320+
321+
label: 'labelAnnotations',
322+
labels: 'labelAnnotations',
323+
324+
landmark: 'landmarkAnnotations',
325+
landmarks: 'landmarkAnnotations',
326+
327+
logo: 'logoAnnotations',
328+
logos: 'logoAnnotations',
329+
330+
properties: 'imagePropertiesAnnotation',
331+
332+
safeSearch: 'safeSearchAnnotation',
333+
334+
text: 'textAnnotations'
335+
};
336+
337+
var typeRespNameToShortName = {
338+
errors: 'errors',
339+
faceAnnotations: 'faces',
340+
imagePropertiesAnnotation: 'properties',
341+
labelAnnotations: 'labels',
342+
landmarkAnnotations: 'landmarks',
343+
logoAnnotations: 'logos',
344+
safeSearchAnnotation: 'safeSearch',
345+
textAnnotations: 'text'
346+
};
347+
295348
Vision.findImages_(images, function(err, images) {
296349
if (err) {
297350
callback(err);
@@ -333,13 +386,120 @@ Vision.prototype.detect = function(images, options, callback) {
333386
return;
334387
}
335388

336-
function mergeArrayOfObjects(arr) {
337-
return extend.apply(null, arr);
389+
var originalResp = extend(true, {}, resp);
390+
391+
var detections = images
392+
.map(groupDetectionsByImage)
393+
.map(assignTypeToEmptyAnnotations)
394+
.map(combineErrors)
395+
.map(flattenAnnotations)
396+
.map(decorateAnnotations);
397+
398+
// If only a single image was given, expose it from the array.
399+
callback(null, isSingleImage ? detections[0] : detections, originalResp);
400+
401+
function groupDetectionsByImage() {
402+
// detections = [
403+
// // Image one:
404+
// [
405+
// {
406+
// faceAnnotations: {},
407+
// labelAnnotations: {},
408+
// ...
409+
// }
410+
// ],
411+
//
412+
// // Image two:
413+
// [
414+
// {
415+
// faceAnnotations: {},
416+
// labelAnnotations: {},
417+
// ...
418+
// }
419+
// ]
420+
// ]
421+
return annotations.splice(0, types.length);
422+
}
423+
424+
function assignTypeToEmptyAnnotations(annotations) {
425+
// Before:
426+
// [
427+
// {}, // What annotation type was attempted?
428+
// { labelAnnotations: {...} }
429+
// ]
430+
//
431+
// After:
432+
// [
433+
// { faceAnnotations: {} },
434+
// { labelAnnotations: {...} }
435+
// ]
436+
return annotations.map(function(annotation, index) {
437+
var detectionType = types[index];
438+
var typeName = typeShortNameToRespName[detectionType];
439+
440+
if (is.empty(annotation) || annotation.error) {
441+
var isPlural = typeName.charAt(typeName.length - 1) === 's';
442+
annotation[typeName] = isPlural ? [] : {};
443+
}
444+
445+
return annotation;
446+
});
447+
}
448+
449+
function combineErrors(annotations) {
450+
// Before:
451+
// [
452+
// {
453+
// faceAnnotations: [],
454+
// error: {...}
455+
// },
456+
// {
457+
// imagePropertiesAnnotation: {},
458+
// error: {...}
459+
// }
460+
// ]
461+
462+
// After:
463+
// [
464+
// faceAnnotations: [],
465+
// imagePropertiesAnnotation: {},
466+
// errors: [
467+
// {...},
468+
// {...}
469+
// ]
470+
// ]
471+
var errors = [];
472+
473+
annotations.forEach(function(annotation) {
474+
var annotationKey = Object.keys(annotation)[0];
475+
476+
if (annotationKey === 'error') {
477+
errors.push(annotation.error);
478+
delete annotation.error;
479+
}
480+
481+
return annotation;
482+
});
483+
484+
annotations.push({
485+
errors: errors
486+
});
487+
488+
return annotations;
489+
}
490+
491+
function flattenAnnotations(annotations) {
492+
return extend.apply(null, annotations);
338493
}
339494

340495
function formatAnnotationBuilder(type) {
341496
return function(annotation) {
497+
if (is.empty(annotation)) {
498+
return annotation;
499+
}
500+
342501
var formatMethodMap = {
502+
errors: Vision.formatError_,
343503
faceAnnotations: Vision.formatFaceAnnotation_,
344504
imagePropertiesAnnotation: Vision.formatImagePropertiesAnnotation_,
345505
labelAnnotations: Vision.formatEntityAnnotation_,
@@ -353,85 +513,41 @@ Vision.prototype.detect = function(images, options, callback) {
353513
};
354514
}
355515

356-
var originalResp = extend(true, {}, resp);
357-
358-
var detections = images
359-
.map(function() {
360-
// Group detections by image...
361-
//
362-
// detections = [
363-
// // Image one:
364-
// [
365-
// {
366-
// faceAnnotations: {},
367-
// labelAnnotations: {},
368-
// ...
369-
// }
370-
// ],
371-
//
372-
// // Image two:
373-
// [
374-
// {
375-
// faceAnnotations: {},
376-
// labelAnnotations: {},
377-
// ...
378-
// }
379-
// ]
380-
// ]
381-
return annotations.splice(0, types.length);
382-
})
383-
.map(mergeArrayOfObjects)
384-
.map(function(annotations) {
385-
if (Object.keys(annotations).length === 0) {
386-
// No annotations found, represent as an empty result set.
387-
return [];
388-
}
389-
390-
for (var annotationType in annotations) {
391-
if (annotations.hasOwnProperty(annotationType)) {
392-
var annotationGroup = arrify(annotations[annotationType]);
393-
394-
var formattedAnnotationGroup = annotationGroup
395-
.map(formatAnnotationBuilder(annotationType));
396-
397-
// An annotation can be singular, e.g. SafeSearch. It is either
398-
// violent or not. Unlike face detection, where there can be
399-
// multiple results.
400-
//
401-
// Be sure the original type (object or array) is preserved and
402-
// not wrapped in an array if it wasn't originally.
403-
if (!is.array(annotations[annotationType])) {
404-
formattedAnnotationGroup = formattedAnnotationGroup[0];
405-
}
406-
407-
var typeFullNameToShortName = {
408-
faceAnnotations: 'faces',
409-
imagePropertiesAnnotation: 'properties',
410-
labelAnnotations: 'labels',
411-
landmarkAnnotations: 'landmarks',
412-
logoAnnotations: 'logos',
413-
safeSearchAnnotation: 'safeSearch',
414-
textAnnotations: 'text'
415-
};
416-
417-
delete annotations[annotationType];
418-
var typeShortName = typeFullNameToShortName[annotationType];
419-
annotations[typeShortName] = formattedAnnotationGroup;
516+
function decorateAnnotations(annotations) {
517+
for (var annotationType in annotations) {
518+
if (annotations.hasOwnProperty(annotationType)) {
519+
var annotationGroup = arrify(annotations[annotationType]);
520+
521+
var formattedAnnotationGroup = annotationGroup
522+
.map(formatAnnotationBuilder(annotationType));
523+
524+
// An annotation can be singular, e.g. SafeSearch. It is either
525+
// violent or not. Unlike face detection, where there can be
526+
// multiple results.
527+
//
528+
// Be sure the original type (object or array) is preserved and
529+
// not wrapped in an array if it wasn't originally.
530+
if (!is.array(annotations[annotationType])) {
531+
formattedAnnotationGroup = formattedAnnotationGroup[0];
420532
}
421-
}
422533

423-
if (types.length === 1) {
424-
// Only a single detection type was asked for, so no need to box in
425-
// the results. Make them accessible without using a key.
426-
var key = Object.keys(annotations)[0];
427-
annotations = annotations[key];
534+
delete annotations[annotationType];
535+
var typeShortName = typeRespNameToShortName[annotationType];
536+
annotations[typeShortName] = formattedAnnotationGroup;
428537
}
538+
}
429539

430-
return annotations;
431-
});
540+
if (types.length === 1) {
541+
// Only a single detection type was asked for, so no need to box in
542+
// the results. Make them accessible without using a key.
543+
var key = Object.keys(annotations)[0];
544+
var errors = annotations.errors;
545+
annotations = annotations[key];
546+
annotations.errors = errors;
547+
}
432548

433-
// If only a single image was given, expose it from the array.
434-
callback(null, isSingleImage ? detections[0] : detections, originalResp);
549+
return annotations;
550+
}
435551
});
436552
});
437553
};
@@ -1271,6 +1387,21 @@ Vision.formatEntityAnnotation_ = function(entityAnnotation, options) {
12711387
return formattedEntityAnnotation;
12721388
};
12731389

1390+
/**
1391+
* Format a raw error from the API.
1392+
*
1393+
* @private
1394+
*/
1395+
Vision.formatError_ = function(err) {
1396+
var httpError = GrpcService.GRPC_ERROR_CODE_TO_HTTP[err.code];
1397+
1398+
if (httpError) {
1399+
err.code = httpError.code;
1400+
}
1401+
1402+
return err;
1403+
};
1404+
12741405
/**
12751406
* Format a raw face annotation response from the API.
12761407
*

0 commit comments

Comments
 (0)