Skip to content

Package

model_navigator.package.package

Package module - structure to snapshot optimization result.

Package

Package(status, workspace, model=None)

Class for storing pipeline execution status.

Initialize object.

Parameters:

  • status (Status) –

    A navigator execution status

  • workspace (Workspace) –

    Workspace for package files

  • model (Optional[object]) –

    An optional model

Source code in model_navigator/package/package.py
def __init__(self, status: Status, workspace: Workspace, model: Optional[object] = None):
    """Initialize object.

    Args:
        status: A navigator execution status
        workspace: Workspace for package files
        model: An optional model
    """
    self.status = status
    self.workspace = workspace
    self._model = model
    self._forward_kw_names = None

config property

config: CommonConfig

Generate configuration from package.

Returns:

  • CommonConfig

    The configuration object

framework property

framework: Framework

Framework for which package was created.

Returns:

  • Framework

    Framework object for package

model property

model: object

Return source model.

Returns:

get_best_model_status

get_best_model_status(strategy=None, include_source=True)

Returns ModelStatus of best model for given strategy.

Parameters:

  • strategy (Optional[RuntimeSearchStrategy]) –

    Strategy for finding the best runtime. Defaults to MaxThroughputAndMinLatencyStrategy.

  • include_source (bool) –

    Flag if Python based model has to be included in analysis

Returns:

  • ModelStatus

    ModelStatus of best model for given strategy or None.

Source code in model_navigator/package/package.py
def get_best_model_status(
    self,
    strategy: Optional[RuntimeSearchStrategy] = None,
    include_source: bool = True,
) -> ModelStatus:
    """Returns ModelStatus of best model for given strategy.

    Args:
        strategy: Strategy for finding the best runtime. Defaults to `MaxThroughputAndMinLatencyStrategy`.
        include_source: Flag if Python based model has to be included in analysis

    Returns:
        ModelStatus of best model for given strategy or None.
    """
    runtime_result = self._get_best_runtime(strategy=strategy, include_source=include_source)
    return runtime_result.model_status

get_model_path

get_model_path(model_key)

Return path of the model.

Parameters:

  • model_key (str) –

    Unique key of the model.

Raises:

  • ModelNavigatorNotFoundError

    When model not found.

Returns:

Source code in model_navigator/package/package.py
def get_model_path(self, model_key: str) -> pathlib.Path:
    """Return path of the model.

    Args:
        model_key (str): Unique key of the model.

    Raises:
        ModelNavigatorNotFoundError: When model not found.

    Returns:
        Path: model path
    """
    try:
        model_config = self.status.models_status[model_key].model_config
    except KeyError:
        raise ModelNavigatorNotFoundError(f"Model {model_key} not found.")
    return self.workspace.path / model_config.path

get_runner

get_runner(strategy=None, include_source=True, return_type=TensorType.NUMPY)

Get the runner according to the strategy.

Parameters:

  • strategy (Optional[RuntimeSearchStrategy]) –

    Strategy for finding the best runtime. Defaults to MaxThroughputAndMinLatencyStrategy.

  • include_source (bool) –

    Flag if Python based model has to be included in analysis

  • return_type (TensorType) –

    The type of the output tensor. Defaults to TensorType.NUMPY. If the return_type supports CUDA tensors (e.g. TensorType.TORCH) and the input tensors are on CUDA, there will be no additional data transfer between CPU and GPU.

Returns:

  • NavigatorRunner

    The optimal runner for the optimized model.

Source code in model_navigator/package/package.py
def get_runner(
    self,
    strategy: Optional[RuntimeSearchStrategy] = None,
    include_source: bool = True,
    return_type: TensorType = TensorType.NUMPY,
) -> NavigatorRunner:
    """Get the runner according to the strategy.

    Args:
        strategy: Strategy for finding the best runtime. Defaults to `MaxThroughputAndMinLatencyStrategy`.
        include_source: Flag if Python based model has to be included in analysis
        return_type: The type of the output tensor. Defaults to `TensorType.NUMPY`.
            If the return_type supports CUDA tensors (e.g. TensorType.TORCH) and the input tensors are on CUDA,
            there will be no additional data transfer between CPU and GPU.

    Returns:
        The optimal runner for the optimized model.
    """
    runtime_result = self._get_best_runtime(strategy=strategy, include_source=include_source)

    model_config = runtime_result.model_status.model_config
    runner_status = runtime_result.runner_status

    if not is_source_format(model_config.format) and not (self.workspace.path / model_config.path).exists():
        raise ModelNavigatorNotFoundError(
            f"The best runner expects {model_config.format.value!r} "
            "model but it is not available in the loaded package."
        )

    if is_source_format(model_config.format) and self._model is None:
        raise ModelNavigatorMissingSourceModelError(
            "The best runner uses the source model but it is not available in the loaded package. "
            "Please load the source model with `package.load_source_model(model)` "
            "or exclude source model from optimal runner search "
            "with `package.get_runner(include_source=False)`."
        )

    return self._get_runner(model_config.key, runner_status.runner_name, return_type=return_type)

is_empty

is_empty()

Validate if package is empty - no models were produced.

Returns:

  • bool

    True if empty package, False otherwise.

Source code in model_navigator/package/package.py
def is_empty(self) -> bool:
    """Validate if package is empty - no models were produced.

    Returns:
        True if empty package, False otherwise.
    """
    for model_status in self.status.models_status.values():
        if not is_source_format(model_status.model_config.format):
            for runner_status in model_status.runners_status.values():
                if (
                    runner_status.status.get(Correctness.__name__) == CommandStatus.OK
                    and runner_status.status.get(Performance.__name__) != CommandStatus.FAIL
                    and (self.workspace.path / model_status.model_config.path.parent).exists()
                ):
                    return False
    return True

load_source_model

load_source_model(model)

Load model defined in Python code.

Parameters:

  • model (object) –

    A model object

Source code in model_navigator/package/package.py
def load_source_model(self, model: object) -> None:
    """Load model defined in Python code.

    Args:
        model: A model object
    """
    if self._model is not None:
        LOGGER.warning("Overriding existing source model.")
    self._model = model

save_status_file

save_status_file()

Save the status.yaml.

Source code in model_navigator/package/package.py
def save_status_file(self) -> None:
    """Save the status.yaml."""
    self._delete_status_file()
    self._create_status_file()