-
Notifications
You must be signed in to change notification settings - Fork 17
/
configs.py
150 lines (141 loc) · 7.96 KB
/
configs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
import argparse
def arg_parse():
parser = argparse.ArgumentParser()
# Utils params
utils_parser = parser.add_argument_group('utils')
utils_parser.add_argument('--cuda', help='CUDA.')
utils_parser.add_argument('--gpu', default=False, help='whether to use GPU.')
utils_parser.add_argument("--seed", type=int)
utils_parser.add_argument("--save", type=str,
help="True to save the trained model obtained")
# Model training params
training_parser = parser.add_argument_group('training')
training_parser.add_argument('--max_nodes', dest='max_nodes', type=int,
help='Maximum number of nodes (ignore graghs with nodes exceeding the number.')
training_parser.add_argument('--method', dest='method',
help='Method. Possible values: base, ')
training_parser.add_argument('--batch_size', dest='batch_size', type=int,
help='Batch size.')
training_parser.add_argument('--epochs', dest='num_epochs', type=int,
help='Number of epochs to train.')
training_parser.add_argument('--train_ratio', dest='train_ratio', type=float,
help='Ratio of number of graphs training set to all graphs.')
training_parser.add_argument('--input_dim', dest='input_dim', type=int,
help='Input feature dimension')
training_parser.add_argument('--hidden_dim', dest='hidden_dim', type=int,
help='Hidden dimension')
training_parser.add_argument('--output_dim', dest='output_dim', type=int,
help='Output dimension')
training_parser.add_argument('--num_gc_layers', dest='num_gc_layers', type=int,
help='Number of graph convolution layers before each pooling')
training_parser.add_argument('--bn', dest='bn', action='store_const',
const=True, default=False,
help='Whether batch normalization is used')
training_parser.add_argument('--dropout', dest='dropout', type=float,
help='Dropout rate.')
training_parser.add_argument('--nobias', dest='bias', action='store_const',
const=False, default=True,
help='Whether to add bias. Default to True.')
training_parser.add_argument('--weight_decay', dest='weight_decay', type=float,
help='Weight decay regularization constant.')
training_parser.add_argument('--clip', dest='clip', type=float,
help='Gradient clip value')
# Evaluation1 params
eval_noise_parser = parser.add_argument_group('eval noise')
eval_noise_parser.add_argument("--feat_explainers", type=list,
default=['GraphSVX', 'GNNExplainer', 'GraphLIME',
'LIME', 'SHAP', 'Greedy'],
help="Name of the benchmarked explainers among \
GraphSVX, SHAP, LIME, GraphLIME, Greedy and GNNExplainer")
eval_noise_parser.add_argument("--node_explainers", type=list, default=['GraphSVX', 'Greedy', 'GNNExplainer'],
help="Name of the benchmarked explainers among Greedy, GNNExplainer and GraphSVX")
eval_noise_parser.add_argument("--test_samples", type=int,
help='number of test samples for evaluation')
eval_noise_parser.add_argument("--K", type=float,
help='proportion of most important features considered, among non zero ones')
eval_noise_parser.add_argument("--prop_noise_feat", type=float,
help='proportion of noisy features')
eval_noise_parser.add_argument("--prop_noise_nodes", type=float,
help='proportion of noisy nodes')
eval_noise_parser.add_argument("--connectedness", type=str,
help='how connected are the noisy nodes we define: low, high or medium')
eval_noise_parser.add_argument("--evalshap", type=bool,
help='True if want to compare GraphSVX with SHAP for features explanations')
# Explanations params
parser.add_argument("--model", type=str,
help="Name of the GNN: GCN or GAT")
parser.add_argument("--dataset", type=str,
help="Name of the dataset among Cora, PubMed, syn1-6, Mutagenicity")
parser.add_argument("--indexes", type=list, default=[0],
help="indexes of the nodes/graphs whose prediction are explained")
parser.add_argument("--hops", type=int,
help="number k for k-hops neighbours considered in an explanation")
parser.add_argument("--num_samples", type=int,
help="number of coalitions sampled and used to approx shapley values")
parser.add_argument("--hv", type=str,
help="method used to convert the simplified input to the original input space")
parser.add_argument("--feat", type=str,
help="method used to determine the features considered")
parser.add_argument("--coal", type=str,
help="type of coalition sampler")
parser.add_argument("--g", type=str,
help="surrogate model used to train g on derived dataset")
parser.add_argument("--multiclass", type=bool,
help='False if we consider explanations for the predicted class only')
parser.add_argument("--regu", type=float,
help='None if we do not apply regularisation, \
1 if we focus only on features in explanations, 0 for nodes')
parser.add_argument("--info", type=bool,
help='True if want to print info')
parser.add_argument("--fullempty", type=str,
help='True if want to discard full and empty coalitions')
parser.add_argument("--S", type=int,
help='Max size of coalitions sampled in priority and treated specifically')
# args_hv: 'compute_pred', 'compute_pred_subgraph', 'graph_classification'
# args_feat: 'All', 'Expectation', 'Null'
# args_coal: 'NewSmarterSeparate', 'SmarterSeparate', 'Smarter', 'Smart', 'Random', 'All'
# args_g: 'WLS', 'WLR', 'WLR_sklearn', 'WLR_Lasso'
parser.set_defaults(dataset='syn1',
model='GCN',
indexes=[500, 600],
num_samples=400,
fullempty=None,
S=1,
hops=3,
hv='compute_pred',
feat='Expectation',
coal='SmarterSeparate',
g='WLR_sklearn',
multiclass=False,
regu=None,
info=False,
seed=10,
gpu=False,
cuda='0',
save=False,
feat_explainers=['GraphSVX', 'GNNExplainer',
'GraphLIME', 'LIME', 'SHAP'],
node_explainers=['GraphSVX', 'GNNExplainer', 'Greedy'],
test_samples=50,
K=0.20,
prop_noise_feat=0.20,
prop_noise_nodes=0.20,
connectedness='medium',
opt='adam',
max_nodes=100,
feature_type='default',
lr=0.001,
clip=2.0,
batch_size=20,
num_epochs=1000,
train_ratio=0.8,
test_ratio=0.1,
input_dim=10,
hidden_dim=20,
output_dim=20,
num_gc_layers=3,
dropout=0.0,
weight_decay=0.005,
method='base'
)
return parser.parse_args()