Skip to content

Package

model_navigator.package.package

Package module - structure to snapshot optimization result.

Package

Package(status, workspace, model=None)

Class for storing pipeline execution status.

Initialize object.

Parameters:

  • status (Status) –

    A navigator execution status

  • workspace (Workspace) –

    Workspace for package files

  • model (Optional[object], default: None ) –

    An optional model

Source code in model_navigator/package/package.py
def __init__(self, status: Status, workspace: Workspace, model: Optional[object] = None):
    """Initialize object.

    Args:
        status: A navigator execution status
        workspace: Workspace for package files
        model: An optional model
    """
    self.status = status
    self.workspace = workspace
    self._model = model

config property

config

Generate configuration from package.

Returns:

  • CommonConfig

    The configuration object

framework property

framework

Framework for which package was created.

Returns:

  • Framework

    Framework object for package

model property

model

Return source model.

Returns:

_create_status_file

_create_status_file()

Create a status.yaml file for package.

Source code in model_navigator/package/package.py
def _create_status_file(self) -> None:
    """Create a status.yaml file for package."""
    path = self.workspace.path / self.status_filename
    data = self._status_serializable_dict()
    with path.open("w") as f:
        yaml.safe_dump(data, f, sort_keys=False)

_delete_status_file

_delete_status_file()

Delete the status.yaml file from package.

Source code in model_navigator/package/package.py
def _delete_status_file(self):
    """Delete the status.yaml file from package."""
    path = self.workspace.path / self.status_filename
    if path.exists():
        path.unlink()

_get_custom_configs

_get_custom_configs(custom_configs)

Build custom configs from config data.

Parameters:

Returns:

  • Dict

    List with mapped objects

Source code in model_navigator/package/package.py
def _get_custom_configs(self, custom_configs: Dict[str, Union[Dict, CustomConfigForFormat]]) -> Dict:
    """Build custom configs from config data.

    Args:
        custom_configs: Dictionary with custom configs data

    Returns:
        List with mapped objects
    """
    custom_configs_mapped = {}
    for class_name, obj in custom_configs.items():
        if isinstance(obj, dict):
            custom_config_class = CUSTOM_CONFIGS_MAPPING[class_name]
            obj = custom_config_class.from_dict(obj)  # pytype: disable=not-instantiable

        custom_configs_mapped[class_name] = obj

    return custom_configs_mapped

_get_runner

_get_runner(model_key, runner_name, device, return_type, inplace=False, runner_config=None)

Load runner.

Parameters:

  • model_key (str) –

    Unique key of the model.

  • runner_name (str) –

    Name of the runner.

  • return_type (TensorType) –

    Type of the runner output.

  • device (str) –

    Device on which the model has been executed

  • inplace (bool, default: False ) –

    Indicate if runner is in inplace mode.

  • runner_config (Optional[RunnerConfig], default: None ) –

    Runner configuration.

Returns:

  • NavigatorRunner

    NavigatorRunner object

Source code in model_navigator/package/package.py
def _get_runner(
    self,
    model_key: str,
    runner_name: str,
    device: str,
    return_type: TensorType,
    inplace: bool = False,
    runner_config: Optional[RunnerConfig] = None,
) -> NavigatorRunner:
    """Load runner.

    Args:
        model_key: Unique key of the model.
        runner_name: Name of the runner.
        return_type: Type of the runner output.
        device: Device on which the model has been executed
        inplace: Indicate if runner is in inplace mode.
        runner_config: Runner configuration.

    Raises:
        ModelNavigatorNotFoundError when no runner found for provided constraints.

    Returns:
        NavigatorRunner object
    """
    try:
        model_config = self.status.models_status[model_key].model_config
    except KeyError:
        raise ModelNavigatorNotFoundError(f"Model {model_key} not found.") from None

    if is_source_format(model_config.format):
        model = self._model
    else:
        model = self.workspace.path / model_config.path

    if runner_config is None:
        runner_config = {}

    device_kind = get_device_kind_from_device_string(device)
    LOGGER.info(f"Creating model `{model_key}` on runner `{runner_name}` and device `{device}`")
    # TODO: implement better handling for redundant device argument in _get_runner and runner_config
    runner_config_dict = runner_config.to_dict(parse=True) if runner_config else {}
    runner_config_dict["device"] = device

    return get_runner(runner_name, device_kind)(
        model=model,
        input_metadata=self.status.input_metadata,
        output_metadata=self.status.output_metadata,
        return_type=return_type,
        # device=device, # TODO: remove redundant device argument and use runner_config
        inplace=inplace,
        **runner_config_dict,
    )  # pytype: disable=not-instantiable

_status_serializable_dict

_status_serializable_dict()

Convert status to serializable dict.

Source code in model_navigator/package/package.py
def _status_serializable_dict(self) -> Dict:
    """Convert status to serializable dict."""
    config = DataObject.filter_data(
        data=self.status.config,
        filter_fields=[
            "model",
            "dataloader",
            "verify_func",
            "workspace",
        ],
    )
    config = DataObject.parse_data(config)
    status = copy.copy(self.status)
    status.config = config
    data = status.to_dict(parse=True)
    return data

get_best_model_status

get_best_model_status(strategies=None, include_source=True)

Returns ModelStatus of best model for given strategy.

Parameters:

  • strategies (Optional[List[RuntimeSearchStrategy]], default: None ) –

    List of strategies for finding the best model. Strategies are selected in provided order. When first fails, next strategy from the list is used. When no strategies have been provided it defaults to [MaxThroughputAndMinLatencyStrategy, MinLatencyStrategy]

  • include_source (bool, default: True ) –

    Flag if Python based model has to be included in analysis

Returns:

  • ModelStatus

    ModelStatus of best model for given strategy or None.

Source code in model_navigator/package/package.py
def get_best_model_status(
    self,
    strategies: Optional[List[RuntimeSearchStrategy]] = None,
    include_source: bool = True,
) -> ModelStatus:
    """Returns ModelStatus of best model for given strategy.

    Args:
        strategies: List of strategies for finding the best model. Strategies are selected in provided order. When
                    first fails, next strategy from the list is used. When no strategies have been provided it
                    defaults to [`MaxThroughputAndMinLatencyStrategy`, `MinLatencyStrategy`]
        include_source: Flag if Python based model has to be included in analysis

    Returns:
        ModelStatus of best model for given strategy or None.
    """
    runtime_result = self.get_best_runtime(strategies=strategies, include_source=include_source)
    return runtime_result.model_status

get_best_runtime

get_best_runtime(strategies=None, include_source=True, inplace=False)

Returns best runtime for given strategy.

Parameters:

  • strategies (Optional[List[RuntimeSearchStrategy]], default: None ) –

    List of strategies for finding the best model. Strategies are selected in provided order. When first fails, next strategy from the list is used. When no strategies have been provided it defaults to [MaxThroughputAndMinLatencyStrategy, MinLatencyStrategy]

  • include_source (bool, default: True ) –

    Flag if Python based model has to be included in analysis

  • inplace (bool, default: False ) –

    should only inplace supported runners be included in analysis

Returns:

  • Best runtime for given strategy.

Source code in model_navigator/package/package.py
def get_best_runtime(
    self,
    strategies: Optional[List[RuntimeSearchStrategy]] = None,
    include_source: bool = True,
    inplace: bool = False,
):
    """Returns best runtime for given strategy.

    Args:
        strategies: List of strategies for finding the best model. Strategies are selected in provided order. When
                    first fails, next strategy from the list is used. When no strategies have been provided it
                    defaults to [`MaxThroughputAndMinLatencyStrategy`, `MinLatencyStrategy`]
        include_source: Flag if Python based model has to be included in analysis
        inplace: should only inplace supported runners be included in analysis

    Returns:
        Best runtime for given strategy.

    Raises:
        ModelNavigatorRuntimeAnalyzerError when no matching results found.
    """
    if strategies is None:
        strategies = DEFAULT_RUNTIME_STRATEGIES

    formats = None
    if not include_source:
        formats = [fmt.value for fmt in SERIALIZED_FORMATS]

    runners = None
    if inplace:
        runners = [name for name, runner in runner_registry.items() if runner.is_inplace]

    runtime_result = None
    for strategy in strategies:
        try:
            runtime_result = RuntimeAnalyzer.get_runtime(
                self.status.models_status,
                strategy=strategy,
                formats=formats,
                runners=runners,
            )
            break
        except ModelNavigatorRuntimeAnalyzerError:
            LOGGER.debug(f"No model found with strategy: {strategy}")

    if runtime_result is None:
        raise ModelNavigatorRuntimeAnalyzerError("No matching results found.")

    return runtime_result

get_model_path

get_model_path(model_key)

Return path of the model.

Parameters:

  • model_key (str) –

    Unique key of the model.

Raises:

  • ModelNavigatorNotFoundError

    When model not found.

Returns:

  • Path ( Path ) –

    model path

Source code in model_navigator/package/package.py
def get_model_path(self, model_key: str) -> pathlib.Path:
    """Return path of the model.

    Args:
        model_key (str): Unique key of the model.

    Raises:
        ModelNavigatorNotFoundError: When model not found.

    Returns:
        Path: model path
    """
    try:
        model_config = self.status.models_status[model_key].model_config
    except KeyError:
        raise ModelNavigatorNotFoundError(f"Model {model_key} not found.") from None
    return self.workspace.path / model_config.path

get_runner

get_runner(strategies=None, include_source=True, return_type=NUMPY, device='cuda', inplace=False)

Get the runner according to the strategy.

Parameters:

  • strategies (Optional[List[RuntimeSearchStrategy]], default: None ) –

    List of strategies for finding the best model. Strategies are selected in provided order. When first fails, next strategy from the list is used. When no strategies have been provided it defaults to [MaxThroughputAndMinLatencyStrategy, MinLatencyStrategy]

  • include_source (bool, default: True ) –

    Flag if Python based model has to be included in analysis

  • return_type (TensorType, default: NUMPY ) –

    The type of the output tensor. Defaults to TensorType.NUMPY. If the return_type supports CUDA tensors (e.g. TensorType.TORCH) and the input tensors are on CUDA, there will be no additional data transfer between CPU and GPU.

  • device (str, default: 'cuda' ) –

    Device where model is going to be executed. Defaults to "cuda".

  • inplace (bool, default: False ) –

    Indicate that runner is in inplace mode.

Returns:

  • NavigatorRunner

    The optimal runner for the optimized model.

Source code in model_navigator/package/package.py
def get_runner(
    self,
    strategies: Optional[List[RuntimeSearchStrategy]] = None,
    include_source: bool = True,
    return_type: TensorType = TensorType.NUMPY,
    device: str = "cuda",
    inplace: bool = False,
) -> NavigatorRunner:
    """Get the runner according to the strategy.

    Args:
        strategies: List of strategies for finding the best model. Strategies are selected in provided order. When
                    first fails, next strategy from the list is used. When no strategies have been provided it
                    defaults to [`MaxThroughputAndMinLatencyStrategy`, `MinLatencyStrategy`]
        include_source: Flag if Python based model has to be included in analysis
        return_type: The type of the output tensor. Defaults to `TensorType.NUMPY`.
            If the return_type supports CUDA tensors (e.g. TensorType.TORCH) and the input tensors are on CUDA,
            there will be no additional data transfer between CPU and GPU.
        device: Device where model is going to be executed. Defaults to `"cuda"`.
        inplace: Indicate that runner is in inplace mode.

    Returns:
        The optimal runner for the optimized model.
    """
    runtime_result = self.get_best_runtime(strategies=strategies, include_source=include_source, inplace=inplace)
    model_config = runtime_result.model_status.model_config

    runner_config = None
    if hasattr(runtime_result.model_status.model_config, "runner_config"):
        runner_config = runtime_result.model_status.model_config.runner_config  # pytype: disable=attribute-error

    runner_status = runtime_result.runner_status

    if not is_source_format(model_config.format) and not (self.workspace.path / model_config.path).exists():
        raise ModelNavigatorNotFoundError(
            f"The best runner expects {model_config.format.value!r} "
            "model but it is not available in the loaded package."
        )

    if is_source_format(model_config.format) and self._model is None:
        raise ModelNavigatorMissingSourceModelError(
            "The best runner uses the source model but it is not available in the loaded package. "
            "Please load the source model with `package.load_source_model(model)` "
            "or exclude source model from optimal runner search "
            "with `package.get_runner(include_source=False)`."
        )

    return self._get_runner(
        model_config.key,
        runner_status.runner_name,
        return_type=return_type,
        device=device,
        inplace=inplace,
        runner_config=runner_config,
    )

is_empty

is_empty()

Validate if package is empty - no models were produced.

Returns:

  • bool

    True if empty package, False otherwise.

Source code in model_navigator/package/package.py
def is_empty(self) -> bool:
    """Validate if package is empty - no models were produced.

    Returns:
        True if empty package, False otherwise.
    """
    for model_status in self.status.models_status.values():
        if not is_source_format(model_status.model_config.format):
            for runner_status in model_status.runners_status.values():
                if (
                    runner_status.status.get(Correctness.__name__) == CommandStatus.OK
                    and runner_status.status.get(Performance.__name__) != CommandStatus.FAIL
                    and (self.workspace.path / model_status.model_config.path.parent).exists()
                ):
                    return False
    return True

load_source_model

load_source_model(model)

Load model defined in Python code.

Parameters:

  • model (object) –

    A model object

Source code in model_navigator/package/package.py
def load_source_model(self, model: object) -> None:
    """Load model defined in Python code.

    Args:
        model: A model object
    """
    if self._model is not None:
        LOGGER.warning("Overriding existing source model.")
    self._model = model

save_status_file

save_status_file()

Save the status.yaml.

Source code in model_navigator/package/package.py
def save_status_file(self) -> None:
    """Save the status.yaml."""
    self._delete_status_file()
    self._create_status_file()