mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2024-03-22 13:10:55 +08:00
SampleProcessor.py : refactoring and gen mask struct
This commit is contained in:
parent
0251eb3490
commit
5fe5fa131c
|
@ -609,7 +609,13 @@ def mirror_landmarks (landmarks, val):
|
|||
result[:,0] = val - result[:,0] - 1
|
||||
return result
|
||||
|
||||
def draw_landmarks (image, image_landmarks, color=(0,255,0), transparent_mask=False, ie_polys=None):
|
||||
def get_face_struct_mask (image_shape, image_landmarks, eyebrows_expand_mod=1.0, ie_polys=None, color=(1,) ):
|
||||
mask = np.zeros(image_shape[0:2]+( len(color),),dtype=np.float32)
|
||||
lmrks = expand_eyebrows(image_landmarks, eyebrows_expand_mod)
|
||||
draw_landmarks (mask, image_landmarks, color=color, draw_circles=False, thickness=2, ie_polys=ie_polys)
|
||||
return mask
|
||||
|
||||
def draw_landmarks (image, image_landmarks, color=(0,255,0), draw_circles=True, thickness=1, transparent_mask=False, ie_polys=None):
|
||||
if len(image_landmarks) != 68:
|
||||
raise Exception('get_image_eye_mask works only with 68 landmarks')
|
||||
|
||||
|
@ -625,16 +631,18 @@ def draw_landmarks (image, image_landmarks, color=(0,255,0), transparent_mask=Fa
|
|||
|
||||
# open shapes
|
||||
cv2.polylines(image, tuple(np.array([v]) for v in ( right_eyebrow, jaw, left_eyebrow, np.concatenate((nose, [nose[-6]])) )),
|
||||
False, color, lineType=cv2.LINE_AA)
|
||||
False, color, thickness=thickness, lineType=cv2.LINE_AA)
|
||||
# closed shapes
|
||||
cv2.polylines(image, tuple(np.array([v]) for v in (right_eye, left_eye, mouth)),
|
||||
True, color, lineType=cv2.LINE_AA)
|
||||
# the rest of the cicles
|
||||
for x, y in np.concatenate((right_eyebrow, left_eyebrow, mouth, right_eye, left_eye, nose), axis=0):
|
||||
cv2.circle(image, (x, y), 1, color, 1, lineType=cv2.LINE_AA)
|
||||
# jaw big circles
|
||||
for x, y in jaw:
|
||||
cv2.circle(image, (x, y), 2, color, lineType=cv2.LINE_AA)
|
||||
True, color, thickness=thickness, lineType=cv2.LINE_AA)
|
||||
|
||||
if draw_circles:
|
||||
# the rest of the cicles
|
||||
for x, y in np.concatenate((right_eyebrow, left_eyebrow, mouth, right_eye, left_eye, nose), axis=0):
|
||||
cv2.circle(image, (x, y), 1, color, 1, lineType=cv2.LINE_AA)
|
||||
# jaw big circles
|
||||
for x, y in jaw:
|
||||
cv2.circle(image, (x, y), 2, color, lineType=cv2.LINE_AA)
|
||||
|
||||
if transparent_mask:
|
||||
mask = get_image_hull_mask (image.shape, image_landmarks, ie_polys=ie_polys)
|
||||
|
|
|
@ -372,14 +372,14 @@ class QModel(ModelBase):
|
|||
sample_process_options=SampleProcessor.Options(random_flip=True if self.pretrain else False),
|
||||
output_sample_types = [ {'types' : (t.IMG_WARPED_TRANSFORMED, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution':resolution, },
|
||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution': resolution, },
|
||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_M), 'data_format':nn.data_format, 'resolution': resolution } ],
|
||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_FACE_MASK_HULL), 'data_format':nn.data_format, 'resolution': resolution } ],
|
||||
generators_count=src_generators_count ),
|
||||
|
||||
SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
|
||||
sample_process_options=SampleProcessor.Options(random_flip=True if self.pretrain else False),
|
||||
output_sample_types = [ {'types' : (t.IMG_WARPED_TRANSFORMED, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution':resolution},
|
||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution': resolution},
|
||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_M), 'data_format':nn.data_format, 'resolution': resolution} ],
|
||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_FACE_MASK_HULL), 'data_format':nn.data_format, 'resolution': resolution} ],
|
||||
generators_count=dst_generators_count )
|
||||
])
|
||||
|
||||
|
|
|
@ -725,14 +725,14 @@ class SAEHDModel(ModelBase):
|
|||
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip),
|
||||
output_sample_types = [ {'types' : (t_img_warped, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution': resolution, 'ct_mode': self.options['ct_mode'] },
|
||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution': resolution, 'ct_mode': self.options['ct_mode'] },
|
||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_M), 'data_format':nn.data_format, 'resolution': resolution } ],
|
||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_FACE_MASK_HULL), 'data_format':nn.data_format, 'resolution': resolution } ],
|
||||
generators_count=src_generators_count ),
|
||||
|
||||
SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
|
||||
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip),
|
||||
output_sample_types = [ {'types' : (t_img_warped, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution': resolution},
|
||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution': resolution},
|
||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_M), 'data_format':nn.data_format, 'resolution': resolution} ],
|
||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_FACE_MASK_HULL), 'data_format':nn.data_format, 'resolution': resolution} ],
|
||||
generators_count=dst_generators_count )
|
||||
])
|
||||
|
||||
|
|
|
@ -7,41 +7,6 @@ import numpy as np
|
|||
from core import imagelib
|
||||
from facelib import FaceType, LandmarksProcessor
|
||||
|
||||
|
||||
"""
|
||||
output_sample_types = [
|
||||
{} opts,
|
||||
...
|
||||
]
|
||||
|
||||
opts:
|
||||
'types' : (S,S,...,S)
|
||||
where S:
|
||||
'IMG_SOURCE'
|
||||
'IMG_WARPED'
|
||||
'IMG_WARPED_TRANSFORMED''
|
||||
'IMG_TRANSFORMED'
|
||||
'IMG_LANDMARKS_ARRAY' #currently unused
|
||||
'IMG_PITCH_YAW_ROLL'
|
||||
|
||||
'FACE_TYPE_HALF'
|
||||
'FACE_TYPE_FULL'
|
||||
'FACE_TYPE_HEAD' #currently unused
|
||||
'FACE_TYPE_AVATAR' #currently unused
|
||||
|
||||
'MODE_BGR' #BGR
|
||||
'MODE_G' #Grayscale
|
||||
'MODE_GGG' #3xGrayscale
|
||||
'MODE_M' #mask only
|
||||
'MODE_BGR_SHUFFLE' #BGR shuffle
|
||||
|
||||
'resolution' : N
|
||||
'motion_blur' : (chance_int, range) - chance 0..100 to apply to face (not mask), and max_size of motion blur
|
||||
'ct_mode' :
|
||||
'normalize_tanh' : bool
|
||||
|
||||
"""
|
||||
|
||||
class SampleProcessor(object):
|
||||
class Types(IntEnum):
|
||||
NONE = 0
|
||||
|
@ -70,9 +35,10 @@ class SampleProcessor(object):
|
|||
MODE_BGR = 40 #BGR
|
||||
MODE_G = 41 #Grayscale
|
||||
MODE_GGG = 42 #3xGrayscale
|
||||
MODE_M = 43 #mask only
|
||||
MODE_BGR_SHUFFLE = 44 #BGR shuffle
|
||||
MODE_BGR_RANDOM_HSV_SHIFT = 45
|
||||
MODE_FACE_MASK_HULL = 43 #mask hull as grayscale
|
||||
MODE_FACE_MASK_STRUCT = 44 #mask structure as grayscale
|
||||
MODE_BGR_SHUFFLE = 45 #BGR shuffle
|
||||
MODE_BGR_RANDOM_HSV_SHIFT = 46
|
||||
MODE_END = 50
|
||||
|
||||
class Options(object):
|
||||
|
@ -135,9 +101,11 @@ class SampleProcessor(object):
|
|||
elif t >= SPTF.MODE_BEGIN and t < SPTF.MODE_END:
|
||||
mode_type = t
|
||||
|
||||
if mode_type == SPTF.MODE_M and not is_face_sample:
|
||||
raise ValueError("MODE_M applicable only for face samples")
|
||||
|
||||
if mode_type == SPTF.MODE_FACE_MASK_HULL and not is_face_sample:
|
||||
raise ValueError("MODE_FACE_MASK_HULL applicable only for face samples")
|
||||
if mode_type == SPTF.MODE_FACE_MASK_STRUCT and not is_face_sample:
|
||||
raise ValueError("MODE_FACE_MASK_STRUCT applicable only for face samples")
|
||||
|
||||
can_warp = (img_type==SPTF.IMG_WARPED or img_type==SPTF.IMG_WARPED_TRANSFORMED)
|
||||
can_transform = (img_type==SPTF.IMG_WARPED_TRANSFORMED or img_type==SPTF.IMG_TRANSFORMED)
|
||||
|
||||
|
@ -164,11 +132,8 @@ class SampleProcessor(object):
|
|||
else:
|
||||
if mode_type == SPTF.NONE:
|
||||
raise ValueError ('expected MODE_ type')
|
||||
|
||||
need_img = mode_type != SPTF.MODE_M
|
||||
need_mask = mode_type == SPTF.MODE_M
|
||||
|
||||
if need_mask:
|
||||
|
||||
if mode_type == SPTF.MODE_FACE_MASK_HULL:
|
||||
if sample.eyebrows_expand_mod is not None:
|
||||
mask = LandmarksProcessor.get_image_hull_mask (sample_bgr.shape, sample.landmarks, eyebrows_expand_mod=sample.eyebrows_expand_mod )
|
||||
else:
|
||||
|
@ -176,8 +141,12 @@ class SampleProcessor(object):
|
|||
|
||||
if sample.ie_polys is not None:
|
||||
sample.ie_polys.overlay_mask(mask)
|
||||
|
||||
if need_img:
|
||||
elif mode_type == SPTF.MODE_FACE_MASK_STRUCT:
|
||||
if sample.eyebrows_expand_mod is not None:
|
||||
mask = LandmarksProcessor.get_face_struct_mask (sample_bgr.shape, sample.landmarks, eyebrows_expand_mod=sample.eyebrows_expand_mod )
|
||||
else:
|
||||
mask = LandmarksProcessor.get_face_struct_mask (sample_bgr.shape, sample.landmarks)
|
||||
else:
|
||||
img = sample_bgr
|
||||
if motion_blur is not None:
|
||||
chance, mb_max_size = motion_blur
|
||||
|
@ -201,37 +170,31 @@ class SampleProcessor(object):
|
|||
if sample.face_type == FaceType.MARK_ONLY:
|
||||
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, sample.shape[0], target_ft), (sample.shape[0],sample.shape[0])
|
||||
|
||||
if need_img:
|
||||
if mode_type == SPTF.MODE_FACE_MASK_HULL or mode_type == SPTF.MODE_FACE_MASK_STRUCT:
|
||||
mask = cv2.warpAffine( mask, mat, flags=cv2.INTER_CUBIC )
|
||||
mask = imagelib.warp_by_params (params, mask, can_warp, can_transform, can_flip=True, border_replicate=False)
|
||||
mask = cv2.resize( mask, (resolution,resolution), cv2.INTER_CUBIC )[...,None]
|
||||
else:
|
||||
img = cv2.warpAffine( img, mat, flags=cv2.INTER_CUBIC )
|
||||
img = imagelib.warp_by_params (params, img, can_warp, can_transform, can_flip=True, border_replicate=True)
|
||||
img = cv2.resize( img, (resolution,resolution), cv2.INTER_CUBIC )
|
||||
|
||||
if need_mask:
|
||||
mask = cv2.warpAffine( mask, mat, flags=cv2.INTER_CUBIC )
|
||||
mask = imagelib.warp_by_params (params, mask, can_warp, can_transform, can_flip=True, border_replicate=False)
|
||||
mask = cv2.resize( mask, (resolution,resolution), cv2.INTER_CUBIC )[...,None]
|
||||
else:
|
||||
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, resolution, target_ft)
|
||||
|
||||
if need_img:
|
||||
|
||||
if mode_type == SPTF.MODE_FACE_MASK_HULL or mode_type == SPTF.MODE_FACE_MASK_STRUCT:
|
||||
mask = imagelib.warp_by_params (params, mask, can_warp, can_transform, can_flip=True, border_replicate=False)
|
||||
mask = cv2.warpAffine( mask, mat, (resolution,resolution), borderMode=cv2.BORDER_CONSTANT, flags=cv2.INTER_CUBIC )[...,None]
|
||||
else:
|
||||
img = imagelib.warp_by_params (params, img, can_warp, can_transform, can_flip=True, border_replicate=True)
|
||||
img = cv2.warpAffine( img, mat, (resolution,resolution), borderMode=cv2.BORDER_REPLICATE, flags=cv2.INTER_CUBIC )
|
||||
|
||||
if need_mask:
|
||||
mask = imagelib.warp_by_params (params, mask, can_warp, can_transform, can_flip=True, border_replicate=False)
|
||||
mask = cv2.warpAffine( mask, mat, (resolution,resolution), borderMode=cv2.BORDER_CONSTANT, flags=cv2.INTER_CUBIC )[...,None]
|
||||
|
||||
else:
|
||||
if need_img:
|
||||
img = imagelib.warp_by_params (params, img, can_warp, can_transform, can_flip=True, border_replicate=True)
|
||||
img = cv2.resize( img, (resolution,resolution), cv2.INTER_CUBIC )
|
||||
|
||||
if need_mask:
|
||||
mask = imagelib.warp_by_params (params, mask, can_warp, can_transform, can_flip=True, border_replicate=False)
|
||||
mask = cv2.resize( mask, (resolution,resolution), cv2.INTER_CUBIC )[...,None]
|
||||
img = imagelib.warp_by_params (params, img, can_warp, can_transform, can_flip=True, border_replicate=True)
|
||||
img = cv2.resize( img, (resolution,resolution), cv2.INTER_CUBIC )
|
||||
|
||||
|
||||
if mode_type == SPTF.MODE_M:
|
||||
if mode_type == SPTF.MODE_FACE_MASK_HULL or mode_type == SPTF.MODE_FACE_MASK_STRUCT:
|
||||
out_sample = np.clip(mask, 0, 1).astype(np.float32)
|
||||
else:
|
||||
img = np.clip(img, 0, 1).astype(np.float32)
|
||||
|
|
Loading…
Reference in New Issue
Block a user