mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2024-03-22 13:10:55 +08:00
AMP, SAEHD: reverted GAN to december version.
This commit is contained in:
parent
b4b72d056f
commit
ee1bc83a14
|
@ -146,11 +146,7 @@ class UNetPatchDiscriminator(nn.ModelBase):
|
|||
|
||||
prev_ch = in_ch
|
||||
self.convs = []
|
||||
self.res1 = []
|
||||
self.res2 = []
|
||||
self.upconvs = []
|
||||
self.upres1 = []
|
||||
self.upres2 = []
|
||||
layers = self.find_archi(patch_size)
|
||||
|
||||
level_chs = { i-1:v for i,v in enumerate([ min( base_ch * (2**i), 512 ) for i in range(len(layers)+1)]) }
|
||||
|
@ -160,14 +156,8 @@ class UNetPatchDiscriminator(nn.ModelBase):
|
|||
for i, (kernel_size, strides) in enumerate(layers):
|
||||
self.convs.append ( nn.Conv2D( level_chs[i-1], level_chs[i], kernel_size=kernel_size, strides=strides, padding='SAME') )
|
||||
|
||||
self.res1.append ( ResidualBlock(level_chs[i]) )
|
||||
self.res2.append ( ResidualBlock(level_chs[i]) )
|
||||
|
||||
self.upconvs.insert (0, nn.Conv2DTranspose( level_chs[i]*(2 if i != len(layers)-1 else 1), level_chs[i-1], kernel_size=kernel_size, strides=strides, padding='SAME') )
|
||||
|
||||
self.upres1.insert (0, ResidualBlock(level_chs[i-1]*2) )
|
||||
self.upres2.insert (0, ResidualBlock(level_chs[i-1]*2) )
|
||||
|
||||
self.out_conv = nn.Conv2D( level_chs[-1]*2, 1, kernel_size=1, padding='VALID')
|
||||
|
||||
self.center_out = nn.Conv2D( level_chs[len(layers)-1], 1, kernel_size=1, padding='VALID')
|
||||
|
@ -178,19 +168,15 @@ class UNetPatchDiscriminator(nn.ModelBase):
|
|||
x = tf.nn.leaky_relu( self.in_conv(x), 0.2 )
|
||||
|
||||
encs = []
|
||||
for conv, res1,res2 in zip(self.convs, self.res1, self.res2):
|
||||
for conv in self.convs:
|
||||
encs.insert(0, x)
|
||||
x = tf.nn.leaky_relu( conv(x), 0.2 )
|
||||
x = res1(x)
|
||||
x = res2(x)
|
||||
|
||||
center_out, x = self.center_out(x), tf.nn.leaky_relu( self.center_conv(x), 0.2 )
|
||||
|
||||
for i, (upconv, enc, upres1, upres2 ) in enumerate(zip(self.upconvs, encs, self.upres1, self.upres2)):
|
||||
for i, (upconv, enc) in enumerate(zip(self.upconvs, encs)):
|
||||
x = tf.nn.leaky_relu( upconv(x), 0.2 )
|
||||
x = tf.concat( [enc, x], axis=nn.conv2d_ch_axis)
|
||||
x = upres1(x)
|
||||
x = upres2(x)
|
||||
|
||||
return center_out, self.out_conv(x)
|
||||
|
||||
|
|
|
@ -123,7 +123,7 @@ class AMPModel(ModelBase):
|
|||
gan_patch_size = np.clip ( io.input_int("GAN patch size", default_gan_patch_size, add_info="3-640", help_message="The higher patch size, the higher the quality, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is resolution / 8." ), 3, 640 )
|
||||
self.options['gan_patch_size'] = gan_patch_size
|
||||
|
||||
gan_dims = np.clip ( io.input_int("GAN dimensions", default_gan_dims, add_info="4-64", help_message="The dimensions of the GAN network. The higher dimensions, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is 16." ), 4, 64 )
|
||||
gan_dims = np.clip ( io.input_int("GAN dimensions", default_gan_dims, add_info="4-512", help_message="The dimensions of the GAN network. The higher dimensions, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is 16." ), 4, 512 )
|
||||
self.options['gan_dims'] = gan_dims
|
||||
|
||||
self.options['ct_mode'] = io.input_str (f"Color transfer for src faceset", default_ct_mode, ['none','rct','lct','mkl','idt','sot'], help_message="Change color distribution of src samples close to dst samples. Try all modes to find the best.")
|
||||
|
|
|
@ -156,7 +156,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
|
|||
gan_patch_size = np.clip ( io.input_int("GAN patch size", default_gan_patch_size, add_info="3-640", help_message="The higher patch size, the higher the quality, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is resolution / 8." ), 3, 640 )
|
||||
self.options['gan_patch_size'] = gan_patch_size
|
||||
|
||||
gan_dims = np.clip ( io.input_int("GAN dimensions", default_gan_dims, add_info="4-64", help_message="The dimensions of the GAN network. The higher dimensions, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is 16." ), 4, 64 )
|
||||
gan_dims = np.clip ( io.input_int("GAN dimensions", default_gan_dims, add_info="4-512", help_message="The dimensions of the GAN network. The higher dimensions, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is 16." ), 4, 512 )
|
||||
self.options['gan_dims'] = gan_dims
|
||||
|
||||
if 'df' in self.options['archi']:
|
||||
|
@ -467,7 +467,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
|
|||
|
||||
gpu_G_loss += self.options['true_face_power']*DLoss(gpu_src_code_d_ones, gpu_src_code_d)
|
||||
|
||||
gpu_D_code_loss = (DLoss(gpu_src_code_d_ones , gpu_dst_code_d) + \
|
||||
gpu_D_code_loss = (DLoss(gpu_dst_code_d_ones , gpu_dst_code_d) + \
|
||||
DLoss(gpu_src_code_d_zeros, gpu_src_code_d) ) * 0.5
|
||||
|
||||
gpu_D_code_loss_gvs += [ nn.gradients (gpu_D_code_loss, self.code_discriminator.get_weights() ) ]
|
||||
|
|
Loading…
Reference in New Issue
Block a user