IBM/pytorchpipe

View on GitHub
configs/vqa_med_2019/c2_classification/c2_classification_all_rnn_vgg16_mcb.yml

Summary

Maintainability
Test Coverage
# Load config defining tasks for training, validation and testing.
default_configs: vqa_med_2019/c2_classification/default_c2_classification.yml

pipeline:

  global_publisher:
    priority: 0
    type: GlobalVariablePublisher
    # Add input_size to globals.
    keys: [question_encoder_output_size, image_encoder_output_size, fused_image_question_activation_size]
    values: [200, 500, 100]

  ################# PIPE 0: question #################
  # Questions encoding.
  question_tokenizer:
    priority: 1.1
    type: SentenceTokenizer
    streams: 
      inputs: questions
      outputs: tokenized_questions

  # Model 1: Embeddings
  question_embeddings:
    priority: 1.2
    type: SentenceEmbeddings
    embeddings_size: 50
    pretrained_embeddings_file: glove.6B.50d.txt
    data_folder: ~/data/vqa-med
    word_mappings_file: questions.all.word.mappings.csv
    streams:
      inputs: tokenized_questions
      outputs: embedded_questions      
  
  # Model 2: RNN
  question_lstm:
    priority: 1.3
    type: RecurrentNeuralNetwork
    cell_type: LSTM
    prediction_mode: Last
    use_logsoftmax: False
    initial_state: Zero
    hidden_size: 50
    streams:
      inputs: embedded_questions
      predictions: question_activations
    globals:
      input_size: embeddings_size
      prediction_size: question_encoder_output_size

  ################# PIPE 2: image #################
  # Image encoder.
  image_encoder:
    priority: 3.1
    type: GenericImageEncoder
    streams:
      inputs: images
      outputs: image_activations
    globals:
      output_size: image_encoder_output_size

  ################# PIPE 3: fusion + classification #################
  # Element wise multiplication + FF.
  question_image_fusion:
    priority: 4.1
    type: CompactBilinearPooling
    streams:
      image_encodings: image_activations
      question_encodings: question_activations
      outputs: fused_image_question_activations
    globals:
      image_encoding_size: image_encoder_output_size
      question_encoding_size: question_encoder_output_size
      output_size: fused_image_question_activation_size

  classifier:
    priority: 4.2
    type: FeedForwardNetwork 
    hidden_sizes: [100]
    dropout_rate: 0.5
    streams:
      inputs: fused_image_question_activations
    globals:
      input_size: fused_image_question_activation_size
      prediction_size: vocabulary_size_c2

  #: pipeline