Export AMP/SAEHD: added "Export quantized" option

This commit is contained in:
iperov 2021-07-30 17:13:46 +04:00
parent f5cc54177f
commit 83b1412da7
2 changed files with 8 additions and 2 deletions

View File

@ -125,7 +125,10 @@ class AMPModel(ModelBase):
if ct_mode == 'none':
ct_mode = None
use_fp16 = self.is_exporting
use_fp16 = False
if self.is_exporting:
use_fp16 = io.input_bool ("Export quantized?", False, help_message='Makes the exported model faster. If you have problems, disable this option.')
conv_dtype = tf.float16 if use_fp16 else tf.float32
class Downscale(nn.ModelBase):

View File

@ -219,7 +219,10 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
self.set_iter(0)
adabelief = self.options['adabelief']
use_fp16 = False#self.options['use_fp16']
use_fp16 = False
if self.is_exporting:
use_fp16 = io.input_bool ("Export quantized?", False, help_message='Makes the exported model faster. If you have problems, disable this option.')
self.gan_power = gan_power = 0.0 if self.pretrain else self.options['gan_power']
random_warp = False if self.pretrain else self.options['random_warp']