-
Notifications
You must be signed in to change notification settings - Fork 5
/
run_revision.sh
132 lines (127 loc) · 4.12 KB
/
run_revision.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
#!/bin/bash
set -e
ckpt_root=./pretrained;
data_root=/mnt/e/data_derived;
cache_root=/mnt/e/cache;
slowfast_root=../slowfast;
models=(airsim_04 MotionNet)
datasets=(pvc1-repeats pvc4 mt1_norm_neutralbg mt2 mst_norm_neutralbg)
max_cells=(22 24 83 43 35)
# Whatever is the largest subsampling that fits into main memory for that dataset.
# We used 64GB VMs.
subsamplings=(21 21 12 16 21)
#.66X, 1X, 1.5X
szs=(74 112 168)
# Fit with resizing, boosting with downsampling
for dataset_num in {0..4};
do
dataset=${datasets[$dataset_num]}
max_cell=${max_cells[$dataset_num]}
for sz in "${szs[@]}";
do
for model in "${models[@]}";
do
echo "$dataset" "$model"
for ((subset = 0; subset <= max_cell; subset++))
do
python train_convex.py \
--exp_name revision \
--dataset "$dataset" \
--features "$model" \
--subset "$subset" \
--batch_size 8 \
--cache_root $cache_root \
--ckpt_root $ckpt_root \
--data_root $data_root \
--slowfast_root $slowfast_root \
--aggregator downsample \
--aggregator_sz 8 \
--skip_existing \
--subsample_layers \
--autotune \
--no_save \
--save_predictions \
--method boosting \
--resize "$sz"
# Clear cache.
rm -f $cache_root/*
done
done
done
done
# Fit with resizing, boosting with subsampling
for dataset_num in {0..4};
do
dataset=${datasets[$dataset_num]}
max_cell=${max_cells[$dataset_num]}
subsampling=${subsamplings[$dataset_num]}
for sz in "${szs[@]}";
do
for model in "${models[@]}";
do
echo "$dataset" "$model"
for ((subset = 0; subset <= max_cell; subset++))
do
python train_convex.py \
--exp_name revision \
--dataset "$dataset" \
--features "$model" \
--subset "$subset" \
--batch_size 8 \
--cache_root $cache_root \
--ckpt_root $ckpt_root \
--data_root $data_root \
--slowfast_root $slowfast_root \
--aggregator downsample_t \
--aggregator_sz "$subsampling" \
--skip_existing \
--subsample_layers \
--autotune \
--no_save \
--save_predictions \
--method boosting \
--resize "$sz"
# Clear cache.
rm -f $cache_root/*
done
done
done
done
# Fit
for dataset_num in {0..4};
do
dataset=${datasets[$dataset_num]}
max_cell=${max_cells[$dataset_num]}
for sz in "${szs[@]}";
do
for model in "${models[@]}";
do
echo "$dataset" "$model"
for ((subset = 0; subset <= max_cell; subset++))
do
python train_convex.py \
--exp_name revision_resize \
--dataset "$dataset" \
--features "$model" \
--subset "$subset" \
--batch_size 8 \
--cache_root $cache_root \
--ckpt_root $ckpt_root \
--data_root $data_root \
--slowfast_root $slowfast_root \
--aggregator downsample \
--aggregator_sz 8 \
--skip_existing \
--subsample_layers \
--autotune \
--no_save \
--save_predictions \
--method ridge \
--pca 500 \
--resize "$sz"
# Clear cache.
rm -f $cache_root/*
done
done
done
done