IBM/pytorchpipe

View on GitHub
configs/default/workers/offline_trainer.yml

Summary

Maintainability
Test Coverage
####################################################################
# Section defining all the default values of parameters used during training when using ptp-offline-trainer.
# If you want to use different section for "training" pass its name as command line argument '--training_section_name' to trainer (DEFAULT: training)
# Note: the following parameters will be (anyway) used as default values.
default_training:
  # Set the random seeds: -1 means that they will be picked randomly.
  # Note: their final values will be stored in the final training_configuration.yml saved to log dir.
  seed_numpy: -1
  seed_torch: -1

  # Default batch size.
  batch_size: 64

  # Definition of the task (Mandatory!)
  #task:
  #  One must define its type (Mandatory!)
  #  type: ?
  #  The rest of the content of that section is task-specific...
  
  # Section describing curriculum learning (Optional)
  #curriculum_learning: 
  #  # Flag indicating whether curriculum learning has to finish before (eventual) termination of the training.
  #  must_finish: True
  #  The rest of the content of that section is task-specific...

  # Definition of optimizer (Mandatory!)
  #optimizer:
  #  # Type - generally all optimizers from PyTorch.optim are allowed (Mandatory!)
  #  type: Adam
  #  # Options: 
  #  lr: 0.0001
  #  The rest of the content of that section is optimizer-specific...

  # Set a default configuration section for data loader.
  dataloader:
    # Shuffle set by default.
    shuffle: True 
    batch_sampler: None
     # Do not use multiprocessing by default.
    num_workers: 0
    pin_memory: False
    # Do not drop last frame by default.
    drop_last: False
    timeout: 0

  # Definition of sampler (Optional)
  # When this section will not be present, worker will use "standard" sampling (please refer to shuffle in dataloader)
  #sampler:
  #  # Type - generally all samplers from PyTorch (plus some new onses) are allowed (Mandatory!)
  #  # Options: 
  #  type: RandomSmpler
  #  The rest of the content of that section is optimizer-specific...

  # Terminal conditions that will be used during training.
  # They can (and ofter should) be overwritten.
  terminal_conditions:
    # Terminal condition I: loss threshold, going below will terminate the training.
    loss_stop_threshold: 0.00001 # 1e-5
    # Terminal condition II: Early stopping monitor validation loss, if it didn't down during last n validations, training will be terminated (Optional, negative means that this condition is disabled)
    early_stop_validations: 10
    # Terminal condition III: maximal number of epochs (Mandatory for this trainer! Must be > 0)
    epoch_limit: 10
    # Terminal condition IV: maximal number of episodes (Optional, -1 (negative) means that this condition is disabled)
    episode_limit: -1



####################################################################
# Section defining all the default values of parameters used during validation.
# If you want to use different section for validation pass its name as command line argument '--validation_section_name' to trainer (DEFAULT: validation)
# Note: the following parameters will be (anyway) used as default values.
default_validation:
  # Defines how often the partial validation will be performed.
  # In this trainer Partial Validation is optional (negative value means it is disabled)
  partial_validation_interval: -1

  # Definition of the task (mandatory!)
  #task:
  #  One must define its type (Mandatory!)
  #  type: ?
  #  The rest of the content of that section is task-specific...

  # Set a default configuration section for data loader.
  dataloader:
    # Shuffle set by default.
    shuffle: True 
     # Do not use multiprocessing by default.
    num_workers: 0
    pin_memory: False
    # Do not drop last frame by default.
    drop_last: False
    timeout: 0

  # Definition of sampler (Optional)
  # When this section will not be present, worker will use "standard" sampling (please refer to shuffle in dataloader)
  #sampler:
  #  # Type - generally all samplers from PyTorch (plus some new onses) are allowed (Mandatory!)
  #  # Options: 
  #  type: RandomSmpler
  #  The rest of the content of that section is optimizer-specific...



####################################################################
# Section defining all the default values of parameters used during training.
# If you want to use different section for validation pass its name as command line argument '--pipeline_section_name' to trainer (DEFAULT: pipeline)
pipeline: 
  # Pipeline must contain at least one component.
  #name_1:
  #   Each component must have defined its priority... (Mandatory!)
  #   priority: 0.1 # Can be float. Smaller means higher priority, up to zero.
  #   # ... and type (Mandatory!)
  #   type: ?
  #   The rest of the content of that section is component-specific...