graphnet-team/graphnet

View on GitHub
configs/models/dynedge_PID_classification_example.yml

Summary

Maintainability
Test Coverage
arguments:
backbone:
ModelConfig:
arguments:
add_global_variables_after_pooling: false
dynedge_layer_sizes: null
features_subset: null
global_pooling_schemes: [min, max, mean, sum]
nb_inputs: 4
nb_neighbours: 8
post_processing_layer_sizes: null
readout_layer_sizes: null
class_name: DynEdge
graph_definition:
ModelConfig:
arguments:
columns: [0, 1, 2]
detector:
ModelConfig:
arguments: {}
class_name: Prometheus
dtype: null
nb_nearest_neighbours: 8
node_definition:
ModelConfig:
arguments: {}
class_name: NodesAsPulses
input_feature_names: [sensor_pos_x, sensor_pos_y, sensor_pos_z, t]
class_name: KNNGraph
optimizer_class: '!class torch.optim.adam Adam'
optimizer_kwargs: {eps: 0.001, lr: 0.001}
scheduler_class: '!class graphnet.training.callbacks PiecewiseLinearLR'
scheduler_config: {interval: step}
scheduler_kwargs:
factors: [0.01, 1, 0.01]
milestones: [0, 20.0, 80]
tasks:
- ModelConfig:
arguments:
nb_outputs: 3 # number of classes
prediction_labels: ['noise', 'muon', 'neutrino']
hidden_size: 128
loss_function:
ModelConfig:
arguments: {options: {1: 0, -1: 0, 13: 1, -13: 1, 12: 2, -12: 2, 14: 2, -14: 2, 16: 2, -16: 2}}
class_name: CrossEntropyLoss
loss_weight: null
target_labels: dummy_pid
transform_inference: '!lambda x: torch.nn.functional.softmax(x, dim=-1)'
transform_prediction_and_target: null
transform_support: null
transform_target: null
class_name: MulticlassClassificationTask
class_name: StandardModel