Skip to content

Main class to modify networks


Main module for modifying networks

NetworkModifier

Class for transforming a deep CNN inputs and outputs. The network should be trained with Tensorflow

Parameters:

Name Type Description Default
config VersionModifyConfig

configurations to load and modify the network

required
Source code in conftrainer/modifications/modifier.py
class NetworkModifier:
    """
    Class for transforming a deep CNN inputs and outputs. The network should be trained with
    Tensorflow

    Parameters
    ----------
    config : VersionModifyConfig
        configurations to load and modify the network
    """

    def __init__(self, config: VersionModifyConfig):
        self.config = config
        self.model = tf.keras.models.load_model(config.load_path, compile=False)
        self.signature = self.model.signatures["serving_default"]
        self.input_func = self._maybe_import_modifier(name=config.input_func)
        self.output_func = self._maybe_import_modifier(name=config.output_func)

        self.input_shape = self._parse_input_shape(input_shape=config.input_shape,
                                                   model=self.model)
        self.rescaling_args = config.rescaling_args

    @property
    def input_placeholder(self):
        """A default placeholder for signature inputs in case there's no input function specified"""
        if self.input_func is None:
            return tf.TensorSpec(shape=[None, None, None, None], dtype=tf.float32, name='inputs')
        dtype = self.input_func.input_signature[0].dtype
        if dtype is tf.string:
            return tf.TensorSpec(shape=[None], dtype=dtype, name='inputs')
        raise ValueError(f"Unknown input dtype: {dtype}")

    @staticmethod
    def _maybe_import_modifier(name: str) -> Optional[tf.function]:
        """Parse and import preprocessing function using its name"""
        if not name:
            return
        return getattr(tf_functions, name)

    @staticmethod
    def _parse_input_shape(input_shape: Optional[Iterable[int]], model: tf.keras.models.Model) -> Iterable[int]:
        """Parse the input shape from the network graph, and make sure it has the length of 3"""
        if input_shape is None:
            try:
                input_shape = model.input.get_shape()[1:]  # Remove batch dimension
            except AttributeError as exc:
                raise ValueError("Either provide a keras model with defined input shape or specify the input shape "
                                 "manually") from exc

        if (not all(isinstance(item, int) for item in input_shape)) or (len(input_shape) < 3):
            raise ValueError(f"Provide a valid input shape: {input_shape = }")

        return input_shape

    @tf.function
    def preprocess(self, inp: tf.Tensor) -> tf.Tensor:
        """
        Apply resizing, preprocessing and input function on given inputs

        Parameters
        ----------
        inp : tf.Tensor
            A single input. Must be of input type of self.input_func if any, otherwise must
            be an image type

        Returns
        -------
        out : tf.Tensor
            resized and preprocessed tensor
        """
        if self.input_func is not None:
            inp = self.input_func(inp)
        if self.rescaling_args != RescaleArgs():
            inp = rescale(inp, **self.rescaling_args.dict())
        inp = tf.image.resize(images=inp, size=self.input_shape[:2])
        return inp

    @tf.function
    def predict_batch(self, batch: tf.Tensor) -> tf.Tensor:
        """
        Wrap new prediction function to accept only one positional argument

        Parameters
        ----------
        batch : tf.Tensor
            a batch of inputs

        Returns
        -------
        out : tf.Tensor
            predictions on received inputs
        """
        batch = tf.map_fn(fn=self.preprocess,
                          elems=batch,
                          fn_output_signature=self.model.input.dtype)

        predictions = self.signature(batch)
        if self.output_func:
            predictions = tf.nest.map_structure(tf.stop_gradient,
                                                tf.vectorized_map(fn=self.output_func,
                                                                  elems=predictions.values()))
        return predictions

    @property
    def modified_signature(self) -> ConcreteFunction:
        """
        Get a signature from custom inference function

        Returns
        --------
        out : list of tf.TensorSpec
            modified signatures of the network
        """
        new_signature = self.predict_batch.get_concrete_function(batch=self.input_placeholder)
        return new_signature

    def save_modified(self, filepath: str) -> None:
        """
        Save the modified network to given path

        First the method tries to save with newer model.save method. If the AttributeError is
        raised, the model will be saved with tf.saved_model. This behavior is usable when
        modifying older models saved with tf<2.5 or using tensorrt conversion

        Parameters
        ----------
        filepath : str
            path to save the new model to
        """
        os.makedirs(filepath, exist_ok=True)
        try:
            self.model.save(filepath=filepath, signatures=self.modified_signature)
        except AttributeError:
            tf.saved_model.save(obj=self.model, export_dir=filepath,
                                signatures=self.modified_signature)  # for models tf<=2.5
        logger.info(f"Successfully saved the network to {filepath}")

input_placeholder property

A default placeholder for signature inputs in case there's no input function specified

modified_signature: ConcreteFunction property

Get a signature from custom inference function

Returns:

Name Type Description
out list of tf.TensorSpec

modified signatures of the network

predict_batch(batch)

Wrap new prediction function to accept only one positional argument

Parameters:

Name Type Description Default
batch tf.Tensor

a batch of inputs

required

Returns:

Name Type Description
out tf.Tensor

predictions on received inputs

Source code in conftrainer/modifications/modifier.py
@tf.function
def predict_batch(self, batch: tf.Tensor) -> tf.Tensor:
    """
    Wrap new prediction function to accept only one positional argument

    Parameters
    ----------
    batch : tf.Tensor
        a batch of inputs

    Returns
    -------
    out : tf.Tensor
        predictions on received inputs
    """
    batch = tf.map_fn(fn=self.preprocess,
                      elems=batch,
                      fn_output_signature=self.model.input.dtype)

    predictions = self.signature(batch)
    if self.output_func:
        predictions = tf.nest.map_structure(tf.stop_gradient,
                                            tf.vectorized_map(fn=self.output_func,
                                                              elems=predictions.values()))
    return predictions

preprocess(inp)

Apply resizing, preprocessing and input function on given inputs

Parameters:

Name Type Description Default
inp tf.Tensor

A single input. Must be of input type of self.input_func if any, otherwise must be an image type

required

Returns:

Name Type Description
out tf.Tensor

resized and preprocessed tensor

Source code in conftrainer/modifications/modifier.py
@tf.function
def preprocess(self, inp: tf.Tensor) -> tf.Tensor:
    """
    Apply resizing, preprocessing and input function on given inputs

    Parameters
    ----------
    inp : tf.Tensor
        A single input. Must be of input type of self.input_func if any, otherwise must
        be an image type

    Returns
    -------
    out : tf.Tensor
        resized and preprocessed tensor
    """
    if self.input_func is not None:
        inp = self.input_func(inp)
    if self.rescaling_args != RescaleArgs():
        inp = rescale(inp, **self.rescaling_args.dict())
    inp = tf.image.resize(images=inp, size=self.input_shape[:2])
    return inp

save_modified(filepath)

Save the modified network to given path

First the method tries to save with newer model.save method. If the AttributeError is raised, the model will be saved with tf.saved_model. This behavior is usable when modifying older models saved with tf<2.5 or using tensorrt conversion

Parameters:

Name Type Description Default
filepath str

path to save the new model to

required
Source code in conftrainer/modifications/modifier.py
def save_modified(self, filepath: str) -> None:
    """
    Save the modified network to given path

    First the method tries to save with newer model.save method. If the AttributeError is
    raised, the model will be saved with tf.saved_model. This behavior is usable when
    modifying older models saved with tf<2.5 or using tensorrt conversion

    Parameters
    ----------
    filepath : str
        path to save the new model to
    """
    os.makedirs(filepath, exist_ok=True)
    try:
        self.model.save(filepath=filepath, signatures=self.modified_signature)
    except AttributeError:
        tf.saved_model.save(obj=self.model, export_dir=filepath,
                            signatures=self.modified_signature)  # for models tf<=2.5
    logger.info(f"Successfully saved the network to {filepath}")

modify_multiple_networks(all_configs, serve_config_save_path)

Modify 1 or more network with given configurations

Parameters:

Name Type Description Default
all_configs List[NetModifyConfig]

configuration for modifying the networks

required
serve_config_save_path str

path to save generated serving config file

required
Source code in conftrainer/modifications/modifier.py
def modify_multiple_networks(all_configs: List[NetModifyConfig], serve_config_save_path: str) -> None:
    """
    Modify 1 or more network with given configurations

    Parameters
    ----------
    all_configs : List[NetModifyConfig]
        configuration for modifying the networks
    serve_config_save_path : str
        path to save generated serving config file
    """
    serving_config_params = []
    for network_config in all_configs:
        versions = []
        for version_config in network_config.versions:
            modify_single_net(model_config=version_config)
            versions.append(version_config.version)
        serving_config_params.append((network_config.name,
                                      network_config.save_base_path,
                                      versions))

    generate_serving_config(param_list=serving_config_params,
                            config_save_path=serve_config_save_path)

modify_single_net(model_config)

Modify and save a single network with given configuration

Parameters:

Name Type Description Default
model_config VersionModifyConfig

configuration for modifying the network

required
Source code in conftrainer/modifications/modifier.py
def modify_single_net(model_config: VersionModifyConfig) -> None:
    """
    Modify and save a single network with given configuration

    Parameters
    ----------
    model_config : VersionModifyConfig
        configuration for modifying the network
    """
    logger.info(f"Loading the network from {model_config.load_path} to transform")
    if os.path.exists(model_config.save_path) and (not model_config.override):
        if os.listdir(model_config.save_path):
            logger.warning(f"{model_config.save_path} not empty. Skipping...")
            return
    modifier = NetworkModifier(config=model_config)
    modifier.save_modified(filepath=model_config.save_path)
    config_save_path = os.path.join(model_config.save_path, "modification_config.yaml")
    save_yaml(obj=model_config.dict(), filepath=config_save_path)
    # Remove unused networks to release memory
    del modifier
    tf.keras.backend.clear_session()

options: docstring_style: numpy members_order: source