Skip to content

TensorFlow 2

model_navigator.api.config.TensorFlowConfig dataclass

Bases: CustomConfigForFormat

TensorFlow custom config used for SavedModel export.

Parameters:

Name Type Description Default
jit_compile Tuple[Optional[bool], ...]

Enable or Disable jit_compile flag for tf.function wrapper for Jax infer function.

(None)
enable_xla Tuple[Optional[bool], ...]

Enable or Disable enable_xla flag for jax2tf converter.

(None)

format: Format property

Format represented by CustomConfig.

Returns:

Type Description
Format

TensorFlowConfig format

defaults()

Update parameters to defaults.

Source code in model_navigator/api/config.py
def defaults(self) -> None:
    """Update parameters to defaults."""
    self.jit_compile = (None,)
    self.enable_xla = (None,)

name() classmethod

Name of the config.

Source code in model_navigator/api/config.py
@classmethod
def name(cls) -> str:
    """Name of the config."""
    return "TensorFlow"

model_navigator.api.config.TensorFlowTensorRTConfig dataclass

Bases: CustomConfigForFormat

TensorFlow TensorRT custom config used for TensorRT SavedModel export.

Parameters:

Name Type Description Default
precision Union[Union[str, TensorRTPrecision], Tuple[Union[str, TensorRTPrecision], ...]]

TensorRT precision.

DEFAULT_TENSORRT_PRECISION
max_workspace_size Optional[int]

Max workspace size used by converter.

DEFAULT_MAX_WORKSPACE_SIZE
minimum_segment_size int

Min size of subgraph.

DEFAULT_MIN_SEGMENT_SIZE
trt_profile Optional[TensorRTProfile]

TensorRT profile.

None

format: Format property

Format represented by CustomConfig.

Returns:

Type Description
Format

TensorFlowTensorRTConfig format

__post_init__()

Parse dataclass enums.

Source code in model_navigator/api/config.py
def __post_init__(self) -> None:
    """Parse dataclass enums."""
    precision = (self.precision,) if not isinstance(self.precision, (list, tuple)) else self.precision
    self.precision = tuple(TensorRTPrecision(p) for p in precision)

defaults()

Update parameters to defaults.

Source code in model_navigator/api/config.py
def defaults(self) -> None:
    """Update parameters to defaults."""
    self.precision = tuple(TensorRTPrecision(p) for p in DEFAULT_TENSORRT_PRECISION)
    self.max_workspace_size = DEFAULT_MAX_WORKSPACE_SIZE
    self.minimum_segment_size = DEFAULT_MIN_SEGMENT_SIZE
    self.trt_profile = None

from_dict(config_dict) classmethod

Instantiate TensorFlowTensorRTConfig from adictionary.

Source code in model_navigator/api/config.py
@classmethod
def from_dict(cls, config_dict: Dict[str, Any]) -> "TensorFlowTensorRTConfig":
    """Instantiate TensorFlowTensorRTConfig from  adictionary."""
    if "trt_profile" in config_dict and not isinstance(config_dict["trt_profile"], TensorRTProfile):
        config_dict["trt_profile"] = TensorRTProfile.from_dict(config_dict["trt_profile"])
    return cls(**config_dict)

name() classmethod

Name of the config.

Source code in model_navigator/api/config.py
@classmethod
def name(cls) -> str:
    """Name of the config."""
    return "TensorFlowTensorRT"

model_navigator.api.tensorflow

TensorFlow optimize API.

optimize(model, dataloader, sample_count=DEFAULT_SAMPLE_COUNT, batching=True, input_names=None, output_names=None, target_formats=None, runners=None, profiler_config=None, workspace=None, verbose=False, debug=False, verify_func=None, custom_configs=None)

Function exports TensorFlow2 model to all supported formats.

Parameters:

Name Type Description Default
model tensorflow.keras.Model

TensorFlow2 model object

required
dataloader SizedDataLoader

Sized iterable with data that will be feed to the model

required
sample_count int

Limits how many samples will be used from dataloader

DEFAULT_SAMPLE_COUNT
batching Optional[bool]

Enable or disable batching on first (index 0) dimension of the model

True
input_names Optional[Tuple[str, ...]]

Model input names

None
output_names Optional[Tuple[str, ...]]

Model output names

None
target_formats Optional[Union[Union[str, Format], Tuple[Union[str, Format], ...]]]

Target model formats for optimize process

None
runners Optional[Union[Union[str, Type[NavigatorRunner]], Tuple[Union[str, Type[NavigatorRunner]], ...]]]

Use only runners provided as paramter

None
profiler_config Optional[ProfilerConfig]

Profiling config

None
workspace Optional[Path]

Workspace where packages will be extracted

None
verbose bool

Enable verbose logging

False
debug bool

Enable debug logging from commands

False
verify_func Optional[VerifyFunction]

Function for additional model verifcation

None
custom_configs Optional[Sequence[CustomConfig]]

Sequence of CustomConfigs used to control produced artifacts

None

Returns:

Type Description
Package

Package descriptor representing created package.

Source code in model_navigator/api/tensorflow.py
def optimize(
    model: tensorflow.keras.Model,
    dataloader: SizedDataLoader,
    sample_count: int = DEFAULT_SAMPLE_COUNT,
    batching: Optional[bool] = True,
    input_names: Optional[Tuple[str, ...]] = None,
    output_names: Optional[Tuple[str, ...]] = None,
    target_formats: Optional[Union[Union[str, Format], Tuple[Union[str, Format], ...]]] = None,
    runners: Optional[Union[Union[str, Type[NavigatorRunner]], Tuple[Union[str, Type[NavigatorRunner]], ...]]] = None,
    profiler_config: Optional[ProfilerConfig] = None,
    workspace: Optional[Path] = None,
    verbose: bool = False,
    debug: bool = False,
    verify_func: Optional[VerifyFunction] = None,
    custom_configs: Optional[Sequence[CustomConfig]] = None,
) -> Package:
    """Function exports TensorFlow2 model to all supported formats.

    Args:
        model: TensorFlow2 model object
        dataloader: Sized iterable with data that will be feed to the model
        sample_count: Limits how many samples will be used from dataloader
        batching: Enable or disable batching on first (index 0) dimension of the model
        input_names: Model input names
        output_names: Model output names
        target_formats: Target model formats for optimize process
        runners: Use only runners provided as paramter
        profiler_config: Profiling config
        workspace: Workspace where packages will be extracted
        verbose: Enable verbose logging
        debug: Enable debug logging from commands
        verify_func: Function for additional model verifcation
        custom_configs: Sequence of CustomConfigs used to control produced artifacts

    Returns:
        Package descriptor representing created package.
    """
    if workspace is None:
        workspace = get_default_workspace()
    if target_formats is None:
        target_formats = DEFAULT_TENSORFLOW_TARGET_FORMATS

    if runners is None:
        runners = default_runners()

    forward_kw_names = None
    sample = next(iter(dataloader))
    if isinstance(sample, Mapping):
        forward_kw_names = tuple(sample.keys())

    target_formats_enums = enums.parse(target_formats, Format)
    runner_names = enums.parse(runners, lambda runner: runner if isinstance(runner, str) else runner.name())

    if profiler_config is None:
        profiler_config = ProfilerConfig()

    if Format.TENSORFLOW not in target_formats_enums:
        target_formats_enums = (Format.TENSORFLOW,) + target_formats_enums

    config = CommonConfig(
        Framework.TENSORFLOW,
        model=model,
        dataloader=dataloader,
        target_formats=target_formats_enums,
        workspace=workspace,
        sample_count=sample_count,
        _input_names=input_names,
        _output_names=output_names,
        batch_dim=0 if batching else None,
        runner_names=runner_names,
        profiler_config=profiler_config,
        forward_kw_names=forward_kw_names,
        verbose=verbose,
        debug=debug,
        verify_func=verify_func,
        custom_configs=map_custom_configs(custom_configs=custom_configs),
    )

    models_config = ModelConfigBuilder.generate_model_config(
        framework=Framework.TENSORFLOW,
        target_formats=target_formats_enums,
        custom_configs=custom_configs,
    )

    builders = [
        preprocessing_builder,
        tensorflow_export_builder,
        tensorflow_conversion_builder,
        correctness_builder,
    ]
    if profiler_config.run_profiling:
        builders.append(profiling_builder)
    builders.append(verify_builder)

    package = PipelineManager.run(
        pipeline_builders=builders,
        config=config,
        models_config=models_config,
    )

    return package