Skip to content

Commit 4f4ddc5

Browse files
authored
Scripts and model checkpoints for KAT of target detection on SVHN dataset (#246)
1 parent 1875ec5 commit 4f4ddc5

8 files changed

+2456
-0
lines changed

gen-demos-max78000.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,3 +15,4 @@ python ai8xize.py --test-dir $TARGET --prefix faceid --checkpoint-file trained/a
1515
python ai8xize.py --test-dir $TARGET --prefix cats-dogs --checkpoint-file trained/ai85-catsdogs-qat8-q.pth.tar --config-file networks/cats-dogs-hwc.yaml --fifo --softmax $COMMON_ARGS "$@"
1616
python ai8xize.py --test-dir $TARGET --prefix camvid_unet --checkpoint-file trained/ai85-camvid-unet-large-fakept-q.pth.tar --config-file networks/camvid-unet-large-fakept.yaml $COMMON_ARGS --overlap-data --mlator --no-unload --max-checklines 8192 --new-kernel-loader "$@"
1717
python ai8xize.py --test-dir $TARGET --prefix aisegment_unet --checkpoint-file trained/ai85-aisegment-unet-large-fakept-q.pth.tar --config-file networks/aisegment-unet-large-fakept.yaml $COMMON_ARGS --overlap-data --mlator --no-unload --max-checklines 8192 --new-kernel-loader "$@"
18+
python ai8xize.py --test-dir $TARGET --prefix svhn_tinierssd --checkpoint-file trained/ai85-svhn-tinierssd-qat8-q.pth.tar --config-file networks/svhn-tinierssd.yaml --overlap-data $COMMON_ARGS "$@"

gen-demos-max78002.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ python ai8xize.py --test-dir $TARGET --prefix faceid --checkpoint-file trained/a
1616
python ai8xize.py --test-dir $TARGET --prefix cats-dogs --checkpoint-file trained/ai85-catsdogs-qat8-q.pth.tar --config-file networks/cats-dogs-hwc-no-fifo.yaml --softmax $COMMON_ARGS "$@"
1717
python ai8xize.py --test-dir $TARGET --prefix camvid_unet --checkpoint-file trained/ai85-camvid-unet-large-fakept-q.pth.tar --config-file networks/camvid-unet-large-fakept.yaml $COMMON_ARGS --overlap-data --mlator --no-unload --max-checklines 8192 "$@"
1818
python ai8xize.py --test-dir $TARGET --prefix aisegment_unet --checkpoint-file trained/ai85-aisegment-unet-large-fakept-q.pth.tar --config-file networks/aisegment-unet-large-fakept.yaml $COMMON_ARGS --overlap-data --mlator --no-unload --max-checklines 8192 "$@"
19+
python ai8xize.py --test-dir $TARGET --prefix svhn_tinierssd --checkpoint-file trained/ai85-svhn-tinierssd-qat8-q.pth.tar --config-file networks/svhn-tinierssd.yaml --overlap-data $COMMON_ARGS "$@"
1920
python ai8xize.py --test-dir $TARGET --prefix cifar-100-effnet2 --checkpoint-file trained/ai87-cifar100-effnet2-qat8-q.pth.tar --config-file networks/ai87-cifar100-effnet2.yaml --softmax $COMMON_ARGS "$@"
2021
python ai8xize.py --test-dir $TARGET --prefix cifar-100-mobilenet-v2-0.75 --checkpoint-file trained/ai87-cifar100-mobilenet-v2-0.75-qat8-q.pth.tar --config-file networks/ai87-cifar100-mobilenet-v2-0.75.yaml --softmax $COMMON_ARGS "$@"
2122
python ai8xize.py --test-dir $TARGET --prefix imagenet --checkpoint-file trained/ai87-imagenet-effnet2-q.pth.tar --config-file networks/ai87-imagenet-effnet2.yaml $COMMON_ARGS "$@"

networks/svhn-tinierssd.yaml

Lines changed: 231 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,231 @@
1+
---
2+
# HWC (little data) configuration for SVHN
3+
# Parallel Model
4+
5+
arch: ai85tinierssd
6+
dataset: svhn_74
7+
8+
layers:
9+
10+
# Layer 0: backbone_conv1
11+
- out_offset: 0x2000
12+
in_offset: 0x2000
13+
processors: 0x0000000000000007
14+
output_processors: 0xffffffff00000000
15+
operation: conv2d
16+
kernel_size: 3x3
17+
pad: 1
18+
activate: ReLU
19+
20+
# Layer 1: backbone_conv2
21+
- out_offset: 0x2000
22+
in_offset: 0x2000
23+
processors: 0xffffffff00000000
24+
output_processors: 0x00000000ffffffff
25+
operation: conv2d
26+
kernel_size: 3x3
27+
pad: 1
28+
activate: ReLU
29+
30+
# Layer 2: backbone_conv3
31+
- out_offset: 0x1000
32+
in_offset: 0x2000
33+
processors: 0x00000000ffffffff
34+
output_processors: 0xffffffffffffffff
35+
operation: conv2d
36+
kernel_size: 3x3
37+
pad: 1
38+
activate: ReLU
39+
max_pool: 2
40+
pool_stride: 2
41+
42+
# Layer 3: backbone_conv4
43+
- out_offset: 0x0000 #1600
44+
in_offset: 0x1000
45+
processors: 0xffffffffffffffff
46+
output_processors: 0xffffffffffffffff
47+
operation: conv2d
48+
kernel_size: 3x3
49+
pad: 1
50+
activate: ReLU
51+
52+
# Layer 4: backbone_conv5
53+
- out_offset: 0x1600 #1600+600 = 1C00
54+
in_offset: 0x0000
55+
processors: 0xffffffffffffffff
56+
output_processors: 0xffffffffffffffff
57+
operation: conv2d
58+
kernel_size: 3x3
59+
pad: 1
60+
activate: ReLU
61+
max_pool: 3
62+
pool_stride: 2
63+
64+
# Layer 5: backbone_conv6
65+
- out_offset: 0x2000 #1600
66+
in_offset: 0x1600
67+
processors: 0xffffffffffffffff
68+
output_processors: 0xffffffffffffffff
69+
operation: conv2d
70+
kernel_size: 3x3
71+
pad: 1
72+
activate: ReLU
73+
74+
# Layer 6: backbone_conv7
75+
- out_offset: 0x4000 #1600
76+
in_offset: 0x2000
77+
processors: 0xffffffffffffffff
78+
output_processors: 0xffffffffffffffff
79+
operation: conv2d
80+
kernel_size: 3x3
81+
pad: 1
82+
activate: ReLU
83+
84+
# Layer 7: backbone_conv8
85+
- name: backbone_conv8
86+
out_offset: 0x1600 #1C00
87+
in_offset: 0x4000
88+
processors: 0xffffffffffffffff
89+
output_processors: 0x00000000ffffffff
90+
operation: conv2d
91+
kernel_size: 3x3
92+
pad: 1
93+
activate: ReLU
94+
95+
# Layer 8: backbone_conv9
96+
- name: backbone_conv9
97+
out_offset: 0x1C00 #1E00
98+
in_offset: 0x1600
99+
processors: 0x00000000ffffffff
100+
output_processors: 0xffffffff00000000
101+
operation: conv2d
102+
kernel_size: 3x3
103+
pad: 1
104+
activate: ReLU
105+
max_pool: 2
106+
pool_stride: 2
107+
108+
# Layer 9: backbone_conv10
109+
- name: backbone_conv10
110+
out_offset: 0x1E00 #1F00
111+
in_offset: 0x1C00
112+
processors: 0xffffffff00000000
113+
output_processors: 0x00000000ffffffff
114+
operation: conv2d
115+
kernel_size: 3x3
116+
pad: 1
117+
activate: ReLU
118+
max_pool: 3
119+
pool_stride: 2
120+
121+
# Layer 10: conv12_1
122+
- name: conv12_1
123+
out_offset: 0x1F00
124+
in_offset: 0x1E00
125+
processors: 0x00000000ffffffff
126+
output_processors: 0x000000000000ffff
127+
operation: conv2d
128+
kernel_size: 3x3
129+
pad: 1
130+
activate: ReLU
131+
132+
# Layer 11: conv12_2
133+
- name: conv12_2
134+
out_offset: 0x2000
135+
in_offset: 0x1F00
136+
processors: 0x000000000000ffff
137+
output_processors: 0x00000000ffff0000
138+
operation: conv2d
139+
kernel_size: 3x3
140+
pad: 1
141+
activate: ReLU
142+
max_pool: 2
143+
pool_stride: 2
144+
145+
# Layer 12: loc_conv8
146+
- out_offset: 0x3000 #510
147+
in_offset: 0x1600
148+
processors: 0x00000000ffffffff
149+
output_processors: 0x000000000000ffff
150+
operation: conv2d
151+
kernel_size: 3x3
152+
pad: 1
153+
in_sequences: backbone_conv8
154+
output: true
155+
156+
# Layer 13: loc_conv9
157+
- out_offset: 0x3510 #144 = 2654
158+
in_offset: 0x1C00
159+
processors: 0xffffffff00000000
160+
output_processors: 0x000000000000ffff
161+
operation: conv2d
162+
kernel_size: 3x3
163+
pad: 1
164+
in_sequences: backbone_conv9
165+
output: true
166+
167+
# Layer 14: loc_conv10
168+
- out_offset: 0x3654
169+
in_offset: 0x1E00
170+
processors: 0x00000000ffffffff
171+
output_processors: 0x000000000000ffff
172+
operation: conv2d
173+
kernel_size: 3x3
174+
pad: 1
175+
in_sequences: backbone_conv10
176+
output: true
177+
178+
# Layer 15: loc_conv12_2
179+
- out_offset: 0x3694
180+
in_offset: 0x2000
181+
processors: 0x00000000ffff0000
182+
output_processors: 0x000000000000ffff
183+
operation: conv2d
184+
kernel_size: 3x3
185+
pad: 1
186+
in_sequences: conv12_2
187+
output: true
188+
189+
# Layer 16: cl_conv8
190+
- out_offset: 0x3000 #510
191+
in_offset: 0x1600
192+
processors: 0x00000000ffffffff
193+
output_processors: 0xfffffffffff00000
194+
operation: conv2d
195+
kernel_size: 3x3
196+
pad: 1
197+
in_sequences: backbone_conv8
198+
output: true
199+
200+
# Layer 17: cl_conv9
201+
- out_offset: 0x3510 #144 = 2654
202+
in_offset: 0x1C00
203+
processors: 0xffffffff00000000
204+
output_processors: 0xfffffffffff00000
205+
operation: conv2d
206+
kernel_size: 3x3
207+
pad: 1
208+
in_sequences: backbone_conv9
209+
output: true
210+
211+
# Layer 18: cl_conv10
212+
- out_offset: 0x3654
213+
in_offset: 0x1E00
214+
processors: 0x00000000ffffffff
215+
output_processors: 0xfffffffffff00000
216+
operation: conv2d
217+
kernel_size: 3x3
218+
pad: 1
219+
in_sequences: backbone_conv10
220+
output: true
221+
222+
# Layer 19: cl_conv12_2
223+
- out_offset: 0x3694
224+
in_offset: 0x2000
225+
processors: 0x00000000ffff0000
226+
output_processors: 0xfffffffffff00000
227+
operation: conv2d
228+
kernel_size: 3x3
229+
pad: 1
230+
in_sequences: conv12_2
231+
output: true

scripts/quantize_svhn_tinierssd.sh

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
#!/bin/sh
2+
python quantize.py trained/ai85-svhn-tinierssd-qat8.pth.tar trained/ai85-svhn-tinierssd-qat8-q.pth.tar --device MAX78000 -v

tests/sample_svhn_74.npy

128 KB
Binary file not shown.
3.96 MB
Binary file not shown.

0 commit comments

Comments
 (0)