Skip to content

TensorFlow 2

model_navigator.tensorflow

TensorFlow optimize API.

optimize

optimize(model, dataloader, sample_count=DEFAULT_SAMPLE_COUNT, batching=True, input_names=None, output_names=None, target_formats=None, target_device=DeviceKind.CUDA, runners=None, optimization_profile=None, workspace=None, verbose=False, debug=False, verify_func=None, custom_configs=None)

Entrypoint for TensorFlow2 optimize.

Perform export, conversion, correctness testing, profiling and model verification.

Parameters:

  • model (Model) –

    TensorFlow2 model object

  • dataloader (SizedDataLoader) –

    Sized iterable with data that will be feed to the model

  • sample_count (int, default: DEFAULT_SAMPLE_COUNT ) –

    Limits how many samples will be used from dataloader

  • batching (Optional[bool], default: True ) –

    Enable or disable batching on first (index 0) dimension of the model

  • input_names (Optional[Tuple[str, ...]], default: None ) –

    Model input names

  • output_names (Optional[Tuple[str, ...]], default: None ) –

    Model output names

  • target_formats (Optional[Tuple[Union[str, Format], ...]], default: None ) –

    Target model formats for optimize process

  • target_device (Optional[DeviceKind], default: CUDA ) –

    Target device for optimize process, default is CUDA

  • runners (Optional[Tuple[Union[str, Type[NavigatorRunner]], ...]], default: None ) –

    Use only runners provided as parameter

  • optimization_profile (Optional[OptimizationProfile], default: None ) –

    Optimization profile for conversion and profiling

  • workspace (Optional[Path], default: None ) –

    Workspace where packages will be extracted

  • verbose (bool, default: False ) –

    Enable verbose logging

  • debug (bool, default: False ) –

    Enable debug logging from commands

  • verify_func (Optional[VerifyFunction], default: None ) –

    Function for additional model verification

  • custom_configs (Optional[Sequence[CustomConfig]], default: None ) –

    Sequence of CustomConfigs used to control produced artifacts

Returns:

  • Package

    Package descriptor representing created package.

Source code in model_navigator/tensorflow/__init__.py
def optimize(
    model: tensorflow.keras.Model,
    dataloader: SizedDataLoader,
    sample_count: int = DEFAULT_SAMPLE_COUNT,
    batching: Optional[bool] = True,
    input_names: Optional[Tuple[str, ...]] = None,
    output_names: Optional[Tuple[str, ...]] = None,
    target_formats: Optional[Tuple[Union[str, Format], ...]] = None,
    target_device: Optional[DeviceKind] = DeviceKind.CUDA,
    runners: Optional[Tuple[Union[str, Type[NavigatorRunner]], ...]] = None,
    optimization_profile: Optional[OptimizationProfile] = None,
    workspace: Optional[pathlib.Path] = None,
    verbose: bool = False,
    debug: bool = False,
    verify_func: Optional[VerifyFunction] = None,
    custom_configs: Optional[Sequence[CustomConfig]] = None,
) -> Package:
    """Entrypoint for TensorFlow2 optimize.

    Perform export, conversion, correctness testing, profiling and model verification.

    Args:
        model: TensorFlow2 model object
        dataloader: Sized iterable with data that will be feed to the model
        sample_count: Limits how many samples will be used from dataloader
        batching: Enable or disable batching on first (index 0) dimension of the model
        input_names: Model input names
        output_names: Model output names
        target_formats: Target model formats for optimize process
        target_device: Target device for optimize process, default is CUDA
        runners: Use only runners provided as parameter
        optimization_profile: Optimization profile for conversion and profiling
        workspace: Workspace where packages will be extracted
        verbose: Enable verbose logging
        debug: Enable debug logging from commands
        verify_func: Function for additional model verification
        custom_configs: Sequence of CustomConfigs used to control produced artifacts

    Returns:
        Package descriptor representing created package.
    """
    if target_device == DeviceKind.CPU and any(
        device.device_type == "GPU" for device in tensorflow.config.get_visible_devices()
    ):
        raise ModelNavigatorConfigurationError(
            "\n"
            "    'target_device == nav.DeviceKind.CPU' is not supported for TensorFlow2 when GPU is available.\n"
            "    To optimize model for CPU, disable GPU with: "
            "'tf.config.set_visible_devices([], 'GPU')' directly after importing TensorFlow.\n"
        )

    if target_formats is None:
        target_formats = DEFAULT_TENSORFLOW_TARGET_FORMATS

    if runners is None:
        runners = default_runners(device_kind=target_device)
    else:
        runners = filter_runners(runners, device_kind=target_device)

    target_formats_enums = enums.parse(target_formats, Format)
    runner_names = enums.parse(runners, lambda runner: runner if isinstance(runner, str) else runner.name())

    if optimization_profile is None:
        optimization_profile = OptimizationProfile()

    if Format.TENSORFLOW not in target_formats_enums:
        target_formats_enums = (Format.TENSORFLOW,) + target_formats_enums

    config = CommonConfig(
        Framework.TENSORFLOW,
        model=model,
        dataloader=dataloader,
        target_formats=target_formats_enums,
        target_device=target_device,
        sample_count=sample_count,
        _input_names=input_names,
        _output_names=output_names,
        batch_dim=0 if batching else None,
        runner_names=runner_names,
        optimization_profile=optimization_profile,
        verbose=verbose,
        debug=debug,
        verify_func=verify_func,
        custom_configs=map_custom_configs(custom_configs=custom_configs),
    )

    models_config = ModelConfigBuilder.generate_model_config(
        framework=Framework.TENSORFLOW,
        target_formats=target_formats_enums,
        custom_configs=custom_configs,
    )

    builders = [
        preprocessing_builder,
        tensorflow_export_builder,
        find_device_max_batch_size_builder,
        tensorflow_conversion_builder,
        tensorflow_tensorrt_conversion_builder,
        tensorrt_conversion_builder,
        correctness_builder,
        performance_builder,
    ]
    if verify_func:
        builders.append(verify_builder)

    package = optimize_pipeline(
        model=model,
        workspace=workspace,
        builders=builders,
        config=config,
        models_config=models_config,
    )

    return package