compressai_trainer.utils#

Miscellaneous.

Aim#

compressai_trainer.utils.aim.query.best_metric_index(run: aim.sdk.run.Run, min_metric: str = 'loss', loader: str = 'valid', scope: str = 'epoch') Optional[int][source]#

Returns step index at which a given metric is minimized.

compressai_trainer.utils.aim.query.get_runs_dataframe(run_hashes: list[str], repo: aim.sdk.repo.Repo, *, min_metric: str = 'loss', metrics: list[str] = ['bpp', 'psnr', 'ms-ssim'], hparams: list[str] = [], epoch: Union[int, Literal['best'], Literal['last']] = 'best') pandas.core.frame.DataFrame[source]#

Returns dataframe of best model metrics for runs.

For each run, accumulates infer metric values at a particular epoch into a dataframe. If epoch == “best”, the epoch minimizing the min_metric is chosen.

compressai_trainer.utils.aim.query.metrics_at_index(run: aim.sdk.run.Run, metrics: list[str], hparams: list[str], index: int, loader: str = 'infer', scope: str = 'epoch') dict[str, Any][source]#

Returns metrics logged at a particular step index.

compressai_trainer.utils.aim.query.run_hashes_by_query(repo: aim.sdk.repo.Repo, query: str) list[str][source]#

Returns hashes of runs that match given query.

Catalyst#

class compressai_trainer.utils.catalyst.loggers.AimLogger(*, experiment: Optional[str] = None, run_hash: Optional[str] = None, exclude: Optional[List[str]] = None, log_batch_metrics: bool = False, log_epoch_metrics: bool = True, repo: Optional[Union[str, aim.sdk.repo.Repo]] = None, run: Optional[aim.sdk.run.Run] = None, **kwargs)[source]#

Aim logger for parameters, metrics, images and other artifacts.

Aim documentation: https://aimstack.readthedocs.io/en/latest/.

Parameters
  • experiment – Name of the experiment in Aim to log to.

  • run_hash – Run hash.

  • exclude – Name of key to exclude from logging.

  • log_batch_metrics – boolean flag to log batch metrics (default: SETTINGS.log_batch_metrics or False).

  • log_epoch_metrics – boolean flag to log epoch metrics (default: SETTINGS.log_epoch_metrics or True).

  • repo – Aim repo object.

  • run – Aim run object. If specified, experiment, run_hash, and repo are ignored.

Python API examples:

from catalyst import dl

runner = dl.SupervisedRunner()
runner.train(
    ...,
    loggers={"aim": dl.AimLogger(experiment="test_exp")}
)
from catalyst import dl

class CustomRunner(dl.IRunner):
    # ...

    def get_loggers(self):
        return {
            "console": dl.ConsoleLogger(),
            "aim": dl.AimLogger(experiment="test_exp")
        }

    # ...

runner = CustomRunner().run()
close_log() None[source]#

End an active Aim run.

exclude: List[str]#
log_artifact(tag: str, runner: IRunner, artifact: object = None, path_to_artifact: Optional[str] = None, scope: Optional[str] = None, kind: str = 'text', context: Optional[Dict] = None, track_kwargs: Optional[Dict] = None, **kwargs) None[source]#

Logs a local file or directory as an artifact to the logger.

log_distribution(tag: str, unused: Any, runner: IRunner, scope: Optional[str] = None, context: Optional[Dict] = None, track_kwargs: Optional[Dict] = None, **kwargs) None[source]#

Logs distribution to Aim for current scope on current step.

log_figure(tag: str, fig: Any, runner: IRunner, scope: Optional[str] = None, context: Optional[Dict] = None, track_kwargs: Optional[Dict] = None, **kwargs) None[source]#

Logs figure to Aim for current scope on current step.

log_hparams(hparams: Dict, runner: Optional[IRunner] = None) None[source]#

Logs parameters for current scope.

Parameters
  • hparams – Parameters to log.

  • runner – experiment runner

log_image(tag: str, image, runner: IRunner, scope: Optional[str] = None, context: Optional[Dict] = None, track_kwargs: Optional[Dict] = None, **kwargs) None[source]#

Logs image to Aim for current scope on current step.

log_metrics(metrics: Dict[str, float], scope: str, runner: IRunner, context: Optional[Dict] = None, track_kwargs: Optional[Dict] = None) None[source]#

Logs batch and epoch metrics to Aim.

property logger#

Internal logger/experiment/etc. from the monitoring system.

run: aim.sdk.run.Run#
class compressai_trainer.utils.catalyst.loggers.AllSuperlogger(enabled_image_loggers: list[str] = ['aim'])[source]#
class compressai_trainer.utils.catalyst.loggers.DistributionSuperlogger[source]#
log_distribution(*args, **kwargs) None[source]#

Logs distribution to available loggers.

loggers: dict[str, catalyst.core.logger.ILogger]#
class compressai_trainer.utils.catalyst.loggers.FigureSuperlogger[source]#
log_figure(*args, **kwargs) None[source]#

Logs figure to available loggers.

loggers: dict[str, catalyst.core.logger.ILogger]#
class compressai_trainer.utils.catalyst.loggers.ImageSuperlogger(enabled_image_loggers: list[str] = ['aim'])[source]#
log_image(*args, **kwargs) None[source]#

Logs image to available loggers.

loggers: dict[str, catalyst.core.logger.ILogger]#

CompressAI#

compressai_trainer.utils.compressai.results.compressai_results_dataframe(filename: str, base_path: Optional[str] = None) pandas.core.frame.DataFrame[source]#

Returns a dataframe containing the results from the given path.

Git#

compressai_trainer.utils.git.branch_name(rev: str = 'HEAD', root: str = '.') str[source]#
compressai_trainer.utils.git.commit_count(rev: str = 'HEAD', root: str = '.') int[source]#
compressai_trainer.utils.git.commit_hash(rev: str = 'HEAD', root: str = '.', short: bool = False) str[source]#
compressai_trainer.utils.git.commit_version(rev: str = '', root: str = '.') str[source]#
compressai_trainer.utils.git.common_ancestor_commit_hash(rev1: str = 'HEAD', rev2: Optional[str] = None, root: str = '.', short: bool = False) str[source]#
compressai_trainer.utils.git.common_ancestor_commit_version(rev1: str = 'HEAD', rev2: str = 'HEAD', root: str = '.') str[source]#
compressai_trainer.utils.git.diff(rev: str = 'HEAD', root: str = '.') str[source]#
compressai_trainer.utils.git.main_branch_name(root: str = '.', candidates: Iterable[str] = ('main', 'master')) str[source]#

Returns name of primary branch (main or master).

Metrics#

compressai_trainer.utils.metrics.compute_metrics(x: torch.Tensor, x_hat: torch.Tensor, metrics: list[str])[source]#
compressai_trainer.utils.metrics.db(x)[source]#

Convert to dB scale.

compressai_trainer.utils.metrics.msssim(a: torch.Tensor, b: torch.Tensor) float[source]#
compressai_trainer.utils.metrics.msssim_db(a: torch.Tensor, b: torch.Tensor) float[source]#
compressai_trainer.utils.metrics.psnr(a: torch.Tensor, b: torch.Tensor) float[source]#

Pip#

compressai_trainer.utils.pip.freeze()[source]#
compressai_trainer.utils.pip.list(format='columns')[source]#

System#

compressai_trainer.utils.system.hostname() str[source]#
compressai_trainer.utils.system.username() str[source]#

Utils#

class compressai_trainer.utils.utils.ConfigStringFormatter[source]#
get_field(field_name, args, kwargs)[source]#
compressai_trainer.utils.utils.compute_padding(in_h: int, in_w: int, *, out_h=None, out_w=None, min_div=1)[source]#

Returns tuples for padding and unpadding.

NOTE: This is also available in compressai.ops as of v1.2.4.

Parameters
  • in_h – Input height.

  • in_w – Input width.

  • out_h – Output height.

  • out_w – Output width.

  • min_div – Length that output dimensions should be divisible by.

compressai_trainer.utils.utils.dl_to_ld(dl: dict[K, list[V]]) list[dict[K, V]][source]#

Converts a dict of lists into a list of dicts.

compressai_trainer.utils.utils.flatten_values(x, value_type=<class 'object'>)[source]#
compressai_trainer.utils.utils.format_dataframe(df: pandas.core.frame.DataFrame, x: str, y: str, curves: list[dict[str, Any]], skip_nan: bool = True) pandas.core.frame.DataFrame[source]#

Returns dataframe prepared for plotting multiple metrics.

Parameters
  • df – Dataframe.

  • x – Destination x series.

  • y – Destination y series.

  • curves – Source y series. Useful for plotting multiple curves of the same unit scale (e.g. dB) on the same plot.

  • skip_nan – Skip accumulating NaN values into x, y series.

Examples:

# Basic, single curve.
[{"name": "{experiment}", "x": "bpp", "y": "psnr"}]

# Multiple series with different suffixes.
[
    {"name": "{experiment} (RGB-PSNR)", "x": "bpp", "y": "psnr_rgb"},
    {"name": "{experiment} (YUV-PSNR)", "x": "bpp", "y": "psnr_yuv"},
]

# Flatten multiple bpps/psnrs onto a single curve.
[
    {
        "name": "{experiment}",
        "x": ["bpp_0", "bpp_1", "bpp_2"],
        "y": ["psnr_0", "psnr_1", "psnr_2"],
    }
]
compressai_trainer.utils.utils.ld_to_dl(ld: list[dict[K, V]]) dict[K, list[V]][source]#

Converts a list of dicts into a dict of lists.

compressai_trainer.utils.utils.np_img_to_tensor(x: numpy.ndarray) torch.Tensor[source]#
compressai_trainer.utils.utils.num_parameters(net: torch.nn.modules.module.Module, predicate=<function <lambda>>) int[source]#
compressai_trainer.utils.utils.tensor_to_np_img(x: torch.Tensor) numpy.ndarray[source]#