|
| 1 | +"""Module for configuration normalization. |
| 2 | +
|
| 3 | +The `[main]` configuration section contains arguments that can be filled with |
| 4 | +different types of values, e.g. `trainer` can be either a single trainer |
| 5 | +object or a list of them. This module provides functions for unifying the |
| 6 | +configuration interface. |
| 7 | +""" |
| 8 | + |
| 9 | +from argparse import Namespace |
| 10 | +from datetime import timedelta |
| 11 | +import re |
| 12 | +import time |
| 13 | +from typing import List, Union, Callable |
| 14 | + |
| 15 | +import numpy as np |
| 16 | + |
| 17 | +from neuralmonkey.dataset import BatchingScheme |
| 18 | +from neuralmonkey.logging import warn |
| 19 | +from neuralmonkey.tf_manager import get_default_tf_manager |
| 20 | +from neuralmonkey.trainers.delayed_update_trainer import DelayedUpdateTrainer |
| 21 | + |
| 22 | + |
| 23 | +def normalize_configuration(cfg: Namespace, train_mode: bool) -> None: |
| 24 | + """Given a configuration namespace, normalize the values it contains. |
| 25 | +
|
| 26 | + Arguments: |
| 27 | + cfg: The namespace object returned by `Configuration.make_namespace` |
| 28 | + train_mode: Boolean flag controlling normalization of parameters only |
| 29 | + used during training. |
| 30 | + """ |
| 31 | + if train_mode: |
| 32 | + _normalize_train_cfg(cfg) |
| 33 | + |
| 34 | + if cfg.tf_manager is None: |
| 35 | + cfg.tf_manager = get_default_tf_manager() |
| 36 | + |
| 37 | + if (cfg.batch_size is None) == (cfg.batching_scheme is None): |
| 38 | + raise ValueError("You must specify either batch_size or " |
| 39 | + "batching_scheme (not both).") |
| 40 | + |
| 41 | + if cfg.batch_size is not None: |
| 42 | + assert cfg.batching_scheme is None |
| 43 | + cfg.batching_scheme = BatchingScheme(batch_size=cfg.batch_size) |
| 44 | + else: |
| 45 | + assert cfg.batching_scheme is not None |
| 46 | + cfg.batch_size = cfg.batching_scheme.batch_size |
| 47 | + |
| 48 | + if cfg.runners_batch_size is None: |
| 49 | + cfg.runners_batch_size = cfg.batching_scheme.batch_size |
| 50 | + |
| 51 | + cfg.runners_batching_scheme = BatchingScheme( |
| 52 | + batch_size=cfg.runners_batch_size, |
| 53 | + token_level_batching=cfg.batching_scheme.token_level_batching, |
| 54 | + use_leftover_buckets=True) |
| 55 | + |
| 56 | + cfg.evaluation = [(e[0], e[0], e[1]) if len(e) == 2 else e |
| 57 | + for e in cfg.evaluation] |
| 58 | + |
| 59 | + if cfg.evaluation: |
| 60 | + cfg.main_metric = "{}/{}".format(cfg.evaluation[-1][0], |
| 61 | + cfg.evaluation[-1][-1].name) |
| 62 | + else: |
| 63 | + cfg.main_metric = "{}/{}".format(cfg.runners[-1].decoder_data_id, |
| 64 | + cfg.runners[-1].loss_names[0]) |
| 65 | + |
| 66 | + if not cfg.tf_manager.minimize_metric: |
| 67 | + raise ValueError("minimize_metric must be set to True in " |
| 68 | + "TensorFlowManager when using loss as " |
| 69 | + "the main metric") |
| 70 | + |
| 71 | + |
| 72 | +def _normalize_train_cfg(cfg: Namespace) -> None: |
| 73 | + """Given a configuration namespace, normalize the values it contains. |
| 74 | +
|
| 75 | + This function is only executed when training mode has been invoked. |
| 76 | +
|
| 77 | + Arguments: |
| 78 | + cfg: The namespace object returned by `Configuration.make_namespace` |
| 79 | + """ |
| 80 | + if not isinstance(cfg.val_dataset, List): |
| 81 | + cfg.val_datasets = [cfg.val_dataset] |
| 82 | + else: |
| 83 | + cfg.val_datasets = cfg.val_dataset |
| 84 | + |
| 85 | + if not isinstance(cfg.trainer, List): |
| 86 | + cfg.trainers = [cfg.trainer] |
| 87 | + else: |
| 88 | + cfg.trainers = cfg.trainer |
| 89 | + |
| 90 | + # deal with delayed trainer and logging periods |
| 91 | + # the correct way if there are more trainers is perhaps to do a |
| 92 | + # lowest common denominator of their batches_per_update. |
| 93 | + # But we can also warn because it is a very weird setup. |
| 94 | + |
| 95 | + delayed_trainers = [t for t in cfg.trainers |
| 96 | + if isinstance(t, DelayedUpdateTrainer)] |
| 97 | + |
| 98 | + denominator = 1 |
| 99 | + if len(cfg.trainers) > 1 and delayed_trainers: |
| 100 | + warn("Weird setup: using more trainers and one of them is delayed " |
| 101 | + "update trainer. No-one can vouch for your safety, user!") |
| 102 | + warn("Using the lowest common denominator of all delayed trainers'" |
| 103 | + " batches_per_update parameters for logging period") |
| 104 | + warn("Note that if you are using a multi-task trainer, it is on " |
| 105 | + "your own risk") |
| 106 | + |
| 107 | + denominator = np.lcm.reduce([t.batches_per_update |
| 108 | + for t in delayed_trainers]) |
| 109 | + elif delayed_trainers: |
| 110 | + assert len(cfg.trainers) == 1 |
| 111 | + denominator = cfg.trainers[0].batches_per_update |
| 112 | + |
| 113 | + cfg.log_timer = _resolve_period(cfg.logging_period, denominator) |
| 114 | + cfg.val_timer = _resolve_period(cfg.validation_period, denominator) |
| 115 | + |
| 116 | + |
| 117 | +def _resolve_period(period: Union[str, int], |
| 118 | + denominator: int) -> Callable[[int, float], bool]: |
| 119 | + """Convert logging period into a function for logging time checks. |
| 120 | +
|
| 121 | + Logging and validation periods can both be provided either as a number of |
| 122 | + batches after which to log/validate, or as a time interval between the |
| 123 | + logs/validation runs. |
| 124 | +
|
| 125 | + This function unifies both representations into a function that decides |
| 126 | + whether to log/validate based on a given training step and time since the |
| 127 | + last log/validation. |
| 128 | +
|
| 129 | + Arguments: |
| 130 | + period: Either a string representing time, or a number representing |
| 131 | + number of batches. |
| 132 | + denominator: Only allow logging when the given step (number of batches |
| 133 | + since the start of the training) is divisible by this value. |
| 134 | + This is used e.g. when `DelayedUpdateTrainer` is used. |
| 135 | +
|
| 136 | + Returns: |
| 137 | + A function of the current training step and time since the last logging |
| 138 | + period that returns a boolean value. |
| 139 | + """ |
| 140 | + def get_batch_logger(period: int) -> Callable[[int, float], bool]: |
| 141 | + def is_time(step: int, _: float) -> bool: |
| 142 | + return step != 0 and step % period == 0 |
| 143 | + return is_time |
| 144 | + |
| 145 | + def get_time_logger(period: float) -> Callable[[int, float], bool]: |
| 146 | + def is_time(step: int, last_time: float) -> bool: |
| 147 | + if step % denominator != 0: |
| 148 | + return False |
| 149 | + return last_time + period < time.process_time() |
| 150 | + return is_time |
| 151 | + |
| 152 | + if isinstance(period, int): |
| 153 | + if period % denominator != 0: |
| 154 | + raise ValueError( |
| 155 | + "When using delayed update trainer, the logging/validation " |
| 156 | + "periods must be divisible by batches_per_update.") |
| 157 | + |
| 158 | + return get_batch_logger(period) |
| 159 | + |
| 160 | + regex = re.compile( |
| 161 | + r"((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)m)?" |
| 162 | + r"((?P<seconds>\d+?)s)?") |
| 163 | + parts = regex.match(period) |
| 164 | + |
| 165 | + if not parts: |
| 166 | + raise ValueError( |
| 167 | + "Validation or logging period have incorrect format. " |
| 168 | + "It should be in format: 3h; 5m; 14s") |
| 169 | + |
| 170 | + time_params = {} |
| 171 | + for (name, param) in parts.groupdict().items(): |
| 172 | + if param: |
| 173 | + time_params[name] = int(param) |
| 174 | + |
| 175 | + delta_seconds = timedelta(**time_params).total_seconds() |
| 176 | + if delta_seconds <= 0: |
| 177 | + raise ValueError("Validation or logging period must be bigger than 0") |
| 178 | + |
| 179 | + return get_time_logger(delta_seconds) |
0 commit comments