mirror of
https://github.com/zebrajr/faceswap.git
synced 2025-12-06 00:20:09 +01:00
Bugfix - TF Version check
This commit is contained in:
parent
ac6bda7f00
commit
84ae6006bb
|
|
@ -87,9 +87,9 @@ class ScriptExecutor(): # pylint:disable=too-few-public-methods
|
|||
FaceswapError
|
||||
If Tensorflow is not found, or is not between versions 2.4 and 2.9
|
||||
"""
|
||||
amd_ver = 2.2
|
||||
min_ver = 2.7
|
||||
max_ver = 2.9
|
||||
amd_ver = (2, 2)
|
||||
min_ver = (2, 7)
|
||||
max_ver = (2, 9)
|
||||
try:
|
||||
import tensorflow as tf # noqa pylint:disable=import-outside-toplevel,unused-import
|
||||
except ImportError as err:
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ from multiprocessing import current_process
|
|||
from socket import timeout as socket_timeout, error as socket_error
|
||||
from threading import get_ident
|
||||
from time import time
|
||||
from typing import cast, Dict, List, Optional, Union, TYPE_CHECKING
|
||||
from typing import cast, Dict, List, Optional, Union, Tuple, TYPE_CHECKING
|
||||
|
||||
import numpy as np
|
||||
from tqdm import tqdm
|
||||
|
|
@ -34,7 +34,7 @@ _image_extensions = [ # pylint:disable=invalid-name
|
|||
_video_extensions = [ # pylint:disable=invalid-name
|
||||
".avi", ".flv", ".mkv", ".mov", ".mp4", ".mpeg", ".mpg", ".webm", ".wmv",
|
||||
".ts", ".vob"]
|
||||
_TF_VERS: Optional[float] = None
|
||||
_TF_VERS: Optional[Tuple[int, int]] = None
|
||||
ValidBackends = Literal["amd", "nvidia", "cpu", "apple_silicon"]
|
||||
|
||||
|
||||
|
|
@ -149,7 +149,7 @@ def set_backend(backend: str) -> None:
|
|||
_FS_BACKEND = backend
|
||||
|
||||
|
||||
def get_tf_version() -> float:
|
||||
def get_tf_version() -> Tuple[int, int]:
|
||||
""" Obtain the major.minor version of currently installed Tensorflow.
|
||||
|
||||
Returns
|
||||
|
|
@ -160,7 +160,8 @@ def get_tf_version() -> float:
|
|||
global _TF_VERS # pylint:disable=global-statement
|
||||
if _TF_VERS is None:
|
||||
import tensorflow as tf # pylint:disable=import-outside-toplevel
|
||||
_TF_VERS = float(".".join(tf.__version__.split(".")[:2])) # pylint:disable=no-member
|
||||
split = tf.__version__.split(".")[:2]
|
||||
_TF_VERS = (int(split[0]), int(split[1]))
|
||||
return _TF_VERS
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ class _EncoderInfo:
|
|||
keras_name: str
|
||||
default_size: int
|
||||
no_amd: bool = False
|
||||
tf_min: float = 2.0
|
||||
tf_min: Tuple[int, int] = (2, 0)
|
||||
scaling: Tuple[int, int] = (0, 1)
|
||||
min_size: int = 32
|
||||
enforce_for_weights: bool = False
|
||||
|
|
@ -95,35 +95,50 @@ _MODEL_MAPPING: Dict[str, _EncoderInfo] = dict(
|
|||
densenet201=_EncoderInfo(
|
||||
keras_name="DenseNet201", default_size=224),
|
||||
efficientnet_b0=_EncoderInfo(
|
||||
keras_name="EfficientNetB0", no_amd=True, tf_min=2.3, scaling=(0, 255), default_size=224),
|
||||
keras_name="EfficientNetB0",
|
||||
no_amd=True, tf_min=(2, 3), scaling=(0, 255), default_size=224),
|
||||
efficientnet_b1=_EncoderInfo(
|
||||
keras_name="EfficientNetB1", no_amd=True, tf_min=2.3, scaling=(0, 255), default_size=240),
|
||||
keras_name="EfficientNetB1",
|
||||
no_amd=True, tf_min=(2, 3), scaling=(0, 255), default_size=240),
|
||||
efficientnet_b2=_EncoderInfo(
|
||||
keras_name="EfficientNetB2", no_amd=True, tf_min=2.3, scaling=(0, 255), default_size=260),
|
||||
keras_name="EfficientNetB2",
|
||||
no_amd=True, tf_min=(2, 3), scaling=(0, 255), default_size=260),
|
||||
efficientnet_b3=_EncoderInfo(
|
||||
keras_name="EfficientNetB3", no_amd=True, tf_min=2.3, scaling=(0, 255), default_size=300),
|
||||
keras_name="EfficientNetB3",
|
||||
no_amd=True, tf_min=(2, 3), scaling=(0, 255), default_size=300),
|
||||
efficientnet_b4=_EncoderInfo(
|
||||
keras_name="EfficientNetB4", no_amd=True, tf_min=2.3, scaling=(0, 255), default_size=380),
|
||||
keras_name="EfficientNetB4",
|
||||
no_amd=True, tf_min=(2, 3), scaling=(0, 255), default_size=380),
|
||||
efficientnet_b5=_EncoderInfo(
|
||||
keras_name="EfficientNetB5", no_amd=True, tf_min=2.3, scaling=(0, 255), default_size=456),
|
||||
keras_name="EfficientNetB5",
|
||||
no_amd=True, tf_min=(2, 3), scaling=(0, 255), default_size=456),
|
||||
efficientnet_b6=_EncoderInfo(
|
||||
keras_name="EfficientNetB6", no_amd=True, tf_min=2.3, scaling=(0, 255), default_size=528),
|
||||
keras_name="EfficientNetB6",
|
||||
no_amd=True, tf_min=(2, 3), scaling=(0, 255), default_size=528),
|
||||
efficientnet_b7=_EncoderInfo(
|
||||
keras_name="EfficientNetB7", no_amd=True, tf_min=2.3, scaling=(0, 255), default_size=600),
|
||||
keras_name="EfficientNetB7",
|
||||
no_amd=True, tf_min=(2, 3), scaling=(0, 255), default_size=600),
|
||||
efficientnet_v2_b0=_EncoderInfo(
|
||||
keras_name="EfficientNetV2B0", no_amd=True, tf_min=2.8, scaling=(-1, 1), default_size=224),
|
||||
keras_name="EfficientNetV2B0",
|
||||
no_amd=True, tf_min=(2, 8), scaling=(-1, 1), default_size=224),
|
||||
efficientnet_v2_b1=_EncoderInfo(
|
||||
keras_name="EfficientNetV2B1", no_amd=True, tf_min=2.8, scaling=(-1, 1), default_size=240),
|
||||
keras_name="EfficientNetV2B1",
|
||||
no_amd=True, tf_min=(2, 8), scaling=(-1, 1), default_size=240),
|
||||
efficientnet_v2_b2=_EncoderInfo(
|
||||
keras_name="EfficientNetV2B2", no_amd=True, tf_min=2.8, scaling=(-1, 1), default_size=260),
|
||||
keras_name="EfficientNetV2B2",
|
||||
no_amd=True, tf_min=(2, 8), scaling=(-1, 1), default_size=260),
|
||||
efficientnet_v2_b3=_EncoderInfo(
|
||||
keras_name="EfficientNetV2B3", no_amd=True, tf_min=2.8, scaling=(-1, 1), default_size=300),
|
||||
keras_name="EfficientNetV2B3",
|
||||
no_amd=True, tf_min=(2, 8), scaling=(-1, 1), default_size=300),
|
||||
efficientnet_v2_s=_EncoderInfo(
|
||||
keras_name="EfficientNetV2S", no_amd=True, tf_min=2.8, scaling=(-1, 1), default_size=384),
|
||||
keras_name="EfficientNetV2S",
|
||||
no_amd=True, tf_min=(2, 8), scaling=(-1, 1), default_size=384),
|
||||
efficientnet_v2_m=_EncoderInfo(
|
||||
keras_name="EfficientNetV2M", no_amd=True, tf_min=2.8, scaling=(-1, 1), default_size=480),
|
||||
keras_name="EfficientNetV2M",
|
||||
no_amd=True, tf_min=(2, 8), scaling=(-1, 1), default_size=480),
|
||||
efficientnet_v2_l=_EncoderInfo(
|
||||
keras_name="EfficientNetV2L", no_amd=True, tf_min=2.8, scaling=(-1, 1), default_size=480),
|
||||
keras_name="EfficientNetV2L",
|
||||
no_amd=True, tf_min=(2, 8), scaling=(-1, 1), default_size=480),
|
||||
inception_resnet_v2=_EncoderInfo(
|
||||
keras_name="InceptionResNetV2", scaling=(-1, 1), min_size=75, default_size=299),
|
||||
inception_v3=_EncoderInfo(
|
||||
|
|
@ -133,9 +148,11 @@ _MODEL_MAPPING: Dict[str, _EncoderInfo] = dict(
|
|||
mobilenet_v2=_EncoderInfo(
|
||||
keras_name="MobileNetV2", scaling=(-1, 1), default_size=224),
|
||||
mobilenet_v3_large=_EncoderInfo(
|
||||
keras_name="MobileNetV3Large", no_amd=True, tf_min=2.4, scaling=(-1, 1), default_size=224),
|
||||
keras_name="MobileNetV3Large",
|
||||
no_amd=True, tf_min=(2, 4), scaling=(-1, 1), default_size=224),
|
||||
mobilenet_v3_small=_EncoderInfo(
|
||||
keras_name="MobileNetV3Small", no_amd=True, tf_min=2.4, scaling=(-1, 1), default_size=224),
|
||||
keras_name="MobileNetV3Small",
|
||||
no_amd=True, tf_min=(2, 4), scaling=(-1, 1), default_size=224),
|
||||
nasnet_large=_EncoderInfo(
|
||||
keras_name="NASNetLarge", scaling=(-1, 1), default_size=331, enforce_for_weights=True),
|
||||
nasnet_mobile=_EncoderInfo(
|
||||
|
|
|
|||
|
|
@ -259,7 +259,7 @@ class TrainerBase():
|
|||
logs = {log[0]: log[1]
|
||||
for log in zip(self._model.state.loss_names, loss)}
|
||||
|
||||
if get_tf_version() > 2.7:
|
||||
if get_tf_version() > (2, 7):
|
||||
# Bug in TF 2.8/2.9 where batch recording got deleted.
|
||||
# ref: https://github.com/keras-team/keras/issues/16173
|
||||
with tf.summary.record_if(True), self._tensorboard._train_writer.as_default(): # noqa pylint:disable=protected-access,not-context-manager
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user