IBM/pytorchpipe

View on GitHub
configs/vqa_med_2019/c2_classification/c2_class_lstm_vgg16_rn.yml

Summary

Maintainability
Test Coverage
# Load config defining tasks for training, validation and testing.
default_configs: vqa_med_2019/c2_classification/default_c2_classification.yml

training:
  task:
    # Appy all preprocessing/data augmentations.
    image_preprocessing: normalize
    # none | random_affine | random_horizontal_flip | normalize | all
    question_preprocessing: lowercase,remove_punctuation,tokenize
    # none | lowercase | remove_punctuation | tokenize | random_remove_stop_words | random_shuffle_words | all
    streams: 
      # Task is returning tokenized questions.
      questions: tokenized_questions

validation:
  task:
    question_preprocessing: lowercase,remove_punctuation,tokenize
    # none | lowercase | remove_punctuation | tokenize | random_remove_stop_words | random_shuffle_words | all
    streams: 
      # Task is returning tokenized questions.
      questions: tokenized_questions


pipeline:

  global_publisher:
    priority: 0
    type: GlobalVariablePublisher
    # Add input_size to globals.
    keys: [question_encoder_output_size]
    values: [100]

  ################# PIPE 0: question #################

  # Model 1: Embeddings
  question_embeddings:
    priority: 1.2
    type: SentenceEmbeddings
    embeddings_size: 100
    pretrained_embeddings_file: glove.6B.100d.txt
    data_folder: ~/data/vqa-med
    word_mappings_file: questions.all.word.mappings.csv
    streams:
      inputs: tokenized_questions
      outputs: embedded_questions      
  
  # Model 2: RNN
  question_lstm:
    priority: 1.3
    type: RecurrentNeuralNetwork
    cell_type: LSTM
    prediction_mode: Last
    use_logsoftmax: False
    initial_state: Zero
    #dropout_rate: 0.5
    hidden_size: 50
    streams:
      inputs: embedded_questions
      predictions: question_activations
    globals:
      input_size: embeddings_size
      prediction_size: question_encoder_output_size

  ################# PIPE 2: image #################
  # Image encoder.
  image_encoder:
    priority: 3.1
    type: GenericImageEncoder
    return_feature_maps: True
    streams:
      inputs: images
      outputs: feature_maps

  ################# PIPE 3: fusion + classification #################
  # Element wise multiplication + FF.
  question_image_fusion:
    priority: 4.1
    type: RelationalNetwork
    dropout_rate: 0.5
    g_theta_sizes: [100, 100]
    streams:
      question_encodings: question_activations
      outputs: fused_image_question_activations
    globals:
      question_encoding_size: question_encoder_output_size
      output_size: fused_image_question_activation_size

  classifier:
    priority: 4.2
    type: FeedForwardNetwork 
    hidden_sizes: [100,100]
    dropout_rate: 0.5
    streams:
      inputs: fused_image_question_activations
    globals:
      input_size: fused_image_question_activation_size
      prediction_size: vocabulary_size_c2

  #: pipeline