PersonDatasetAssembler: add the option to mach images that do NOT contain the specified person
This commit is contained in:
@ -48,10 +48,11 @@ def extract_video_images(video: cv2.VideoCapture, interval: int = 0):
|
|||||||
ret = True
|
ret = True
|
||||||
frame_counter = 0
|
frame_counter = 0
|
||||||
while ret:
|
while ret:
|
||||||
|
video.set(cv2.CAP_PROP_POS_FRAMES, frame_counter)
|
||||||
ret, frame = video.read()
|
ret, frame = video.read()
|
||||||
if ret and frame_counter % interval == 0:
|
if ret:
|
||||||
yield frame
|
yield frame
|
||||||
frame_counter += 1
|
frame_counter += interval
|
||||||
|
|
||||||
|
|
||||||
def contains_face_match(detector: cv2.FaceDetectorYN, recognizer: cv2.FaceRecognizerSF, image: numpy.ndarray, referance_features: list(), thresh: float) -> bool:
|
def contains_face_match(detector: cv2.FaceDetectorYN, recognizer: cv2.FaceRecognizerSF, image: numpy.ndarray, referance_features: list(), thresh: float) -> bool:
|
||||||
@ -88,6 +89,9 @@ def process_referance(detector: cv2.FaceDetectorYN, recognizer: cv2.FaceRecogniz
|
|||||||
for image in images:
|
for image in images:
|
||||||
detector.setInputSize([image.shape[1], image.shape[0]])
|
detector.setInputSize([image.shape[1], image.shape[0]])
|
||||||
faces = detector.detect(image)[1]
|
faces = detector.detect(image)[1]
|
||||||
|
if faces is None:
|
||||||
|
print("unable to find face in referance image")
|
||||||
|
exit(1)
|
||||||
image = recognizer.alignCrop(image, faces[0])
|
image = recognizer.alignCrop(image, faces[0])
|
||||||
features = recognizer.feature(image)
|
features = recognizer.feature(image)
|
||||||
out.append(features)
|
out.append(features)
|
||||||
@ -103,6 +107,7 @@ if __name__ == "__main__":
|
|||||||
parser.add_argument('--match_model', '-m', required=True, help="Path to the onnx recognition model to be used")
|
parser.add_argument('--match_model', '-m', required=True, help="Path to the onnx recognition model to be used")
|
||||||
parser.add_argument('--detect_model', '-d', required=True, help="Path to the onnx detection model to be used")
|
parser.add_argument('--detect_model', '-d', required=True, help="Path to the onnx detection model to be used")
|
||||||
parser.add_argument('--threshold', '-t', default=0.362, type=float, help="match threshold to use")
|
parser.add_argument('--threshold', '-t', default=0.362, type=float, help="match threshold to use")
|
||||||
|
parser.add_argument('--invert', '-n', action='store_true', help="output files that DONT match")
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
recognizer = cv2.FaceRecognizerSF.create(model=args.match_model, config="", backend_id=cv2.dnn.DNN_BACKEND_DEFAULT , target_id=cv2.dnn.DNN_TARGET_CPU)
|
recognizer = cv2.FaceRecognizerSF.create(model=args.match_model, config="", backend_id=cv2.dnn.DNN_BACKEND_DEFAULT , target_id=cv2.dnn.DNN_TARGET_CPU)
|
||||||
@ -140,7 +145,7 @@ if __name__ == "__main__":
|
|||||||
else:
|
else:
|
||||||
resized = image
|
resized = image
|
||||||
score, match = contains_face_match(detector, recognizer, resized, referance_features, args.threshold)
|
score, match = contains_face_match(detector, recognizer, resized, referance_features, args.threshold)
|
||||||
if match:
|
if match and not args.invert or not match and args.invert:
|
||||||
filename = f"{counter:04}.png"
|
filename = f"{counter:04}.png"
|
||||||
cv2.imwrite(os.path.join(args.out, filename), image)
|
cv2.imwrite(os.path.join(args.out, filename), image)
|
||||||
counter += 1
|
counter += 1
|
||||||
|
Reference in New Issue
Block a user