IBM/pytorchpipe

View on GitHub
configs/vqa_med_2019/c2_classification/c2_class_lstm_resnet50_mfb_cat_is.yml

Summary

Maintainability
Test Coverage
# Load config defining tasks for training, validation and testing.
default_configs: vqa_med_2019/c2_classification/default_c2_classification.yml

training:
  task:
    batch_size: 48
    # Appy all preprocessing/data augmentations.
    question_preprocessing: lowercase,remove_punctuation,tokenize
    streams:
      # Task is returning tokenized questions.
      questions: tokenized_questions

validation:
  task:
    batch_size: 48
    # Appy all preprocessing/data augmentations.
    question_preprocessing: lowercase,remove_punctuation,tokenize
    streams:
      # Task is returning tokenized questions.
      questions: tokenized_questions


pipeline:

  global_publisher:
    priority: 0
    type: GlobalVariablePublisher
    # Add input_size to globals.
    keys: [question_encoder_output_size, image_encoder_output_size, pooling_activation_size]
    values: [100, 2048, 256]

  ################# PIPE 0: question #################

  # Model 1: Embeddings
  question_embeddings:
    priority: 1.2
    type: SentenceEmbeddings
    embeddings_size: 100
    pretrained_embeddings_file: glove.6B.100d.txt
    data_folder: ~/data/vqa-med
    word_mappings_file: questions.all.word.mappings.csv
    streams:
      inputs: tokenized_questions
      outputs: embedded_questions

  # Model 2: RNN
  question_lstm:
    priority: 1.3
    type: RecurrentNeuralNetwork
    cell_type: LSTM
    prediction_mode: Last
    use_logsoftmax: False
    initial_state: Trainable
    dropout_rate: 0.1
    hidden_size: 50
    streams:
      inputs: embedded_questions
      predictions: question_activations
    globals:
      input_size: embeddings_size
      prediction_size: question_encoder_output_size

  ################# PIPE 2: image #################
  # Image encoder.
  image_encoder:
    priority: 3.1
    type: GenericImageEncoder
    model_type: resnet50
    streams:
      inputs: images
      outputs: image_activations
    globals:
      output_size: image_encoder_output_size

  ################# PIPE 3: image-question fusion  #################
  # Attention + FF.
  question_image_fusion:
    priority: 4.1
    type: FactorizedBilinearPooling
    dropout_rate: 0.5
    latent_size: 256
    pool_factor: 2
    streams:
      image_encodings: image_activations
      question_encodings: question_activations
      outputs: pooling_activations
    globals:
      image_encoding_size: image_encoder_output_size
      question_encoding_size: question_encoder_output_size
      output_size: pooling_activation_size # same as latent size

  classifier:
    priority: 5.1
    type: FeedForwardNetwork
    hidden_sizes: [100]
    dropout_rate: 0.5
    streams:
      inputs: pooling_activations
    globals:
      input_size: pooling_activation_size
      prediction_size: vocabulary_size_c2


  #: pipeline