Skip to content

Commit 5d0d83f

Browse files
author
feiyuxiao
committed
modify the processor
1 parent 3f7b835 commit 5d0d83f

14 files changed

+584
-582
lines changed

Code/Processor/ResNet/README.md

+9
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
# ReadMe
2+
该模块为整个回归系统的回归器。分为4个部分,分别是:数据处理、Resnet搭建、网络训练、模型预测
3+
模块的主要功能为:
4+
5+
1. 从本地目录获取所有清洗好的数据,将数据按照Resnet的要求加载到内存。
6+
2. 对数据自动分为训练数据和预测数据,其中训练数据喂入Resnet进行模型训练。
7+
3. 对训练好的模型进行预测结果的输出。
8+
4. 支持断点续训,训练过程中模型会每10轮保存一次,可以自由跳转到已经保存好的模型进行继续训练或者模型输出
9+
5. 支持迁移学习。即利用已经训练好的模型训练新的场景,提高训练的效率和降低对样本数量的需求。
Original file line numberDiff line numberDiff line change
@@ -1,171 +1,171 @@
1-
'''
2-
该模块为前向传播网路,定义网络结构
3-
'''
4-
import tensorflow as tf
5-
6-
CONV_SIZE = 5#卷积核大小
7-
8-
# 各层通道数
9-
CONV1_KERNEL_NUM = 64
10-
CONV2_KERNEL_NUM = 64
11-
CONV3_KERNEL_NUM = 64
12-
CONV4_KERNEL_NUM = 64
13-
CONV5_KERNEL_NUM = 64
14-
CONV6_KERNEL_NUM = 64
15-
CONV7_KERNEL_NUM = 64
16-
CONV8_KERNEL_NUM = 64
17-
CONV9_KERNEL_NUM = 64
18-
CONV10_KERNEL_NUM = 64
19-
CONV11_KERNEL_NUM = 64
20-
CONV12_KERNEL_NUM = 1
21-
22-
# 生成卷积核,权重w
23-
def get_weight(shape, regularizer=None):
24-
'''生成卷积核,卷积核形状shape,权重w由正态分布产生'''
25-
w = tf.Variable(tf.truncated_normal(shape, stddev=0.01))
26-
if regularizer != None:
27-
tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(regularizer)(w))
28-
return w
29-
30-
# 生成偏置项
31-
def get_bias(shape):
32-
'''初始化0偏置项b'''
33-
b = tf.Variable(tf.zeros(shape))
34-
return b
35-
36-
def conv2d(x,w):
37-
'''
38-
定义卷积操作
39-
x:输入图片
40-
w:卷积核
41-
strides:移动步长,左右上下移动步长为1
42-
padding:0填充操作,为了卷积操作后图片维度与原来相等
43-
'''
44-
return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
45-
46-
47-
# 定义前向传播网络,按照文章的ResNet
48-
def forward(x, channel, regularizer=None):
49-
50-
# 第1个卷积层
51-
conv1_w = get_weight([CONV_SIZE, CONV_SIZE, channel, CONV1_KERNEL_NUM], regularizer)
52-
conv1_b = get_bias([CONV1_KERNEL_NUM])
53-
conv1 = conv2d(x, conv1_w)
54-
conv1_op = tf.nn.bias_add(conv1, conv1_b)
55-
56-
# 第1个激活
57-
relu1 = tf.nn.relu(conv1_op)
58-
59-
# 第2个卷积层
60-
conv2_w = get_weight([CONV_SIZE, CONV_SIZE, CONV1_KERNEL_NUM, CONV2_KERNEL_NUM], regularizer)
61-
conv2_b = get_bias([CONV2_KERNEL_NUM])
62-
conv2 = conv2d(relu1, conv2_w)
63-
conv2_op = tf.nn.bias_add(conv2, conv2_b)
64-
65-
# 第2个激活
66-
relu2 = tf.nn.relu(conv2_op)
67-
68-
# 第3个卷积层
69-
conv3_w = get_weight([CONV_SIZE, CONV_SIZE, CONV2_KERNEL_NUM, CONV3_KERNEL_NUM], regularizer)
70-
conv3_b = get_bias([CONV3_KERNEL_NUM])
71-
conv3 = conv2d(relu2, conv3_w)
72-
conv3_op = tf.nn.bias_add(conv3,conv3_b)
73-
74-
# 第1个add层
75-
add1 = tf.add(conv3_op, conv1_op)
76-
77-
# 第3个激活层
78-
relu3 = tf.nn.relu(add1)
79-
80-
# 第4个卷积层
81-
conv4_w = get_weight([CONV_SIZE, CONV_SIZE, CONV3_KERNEL_NUM, CONV4_KERNEL_NUM], regularizer)
82-
conv4_b = get_bias([CONV4_KERNEL_NUM])
83-
conv4 = conv2d(relu3, conv4_w)
84-
conv4_op = tf.nn.bias_add(conv4, conv4_b)
85-
86-
# 第4个激活层
87-
relu4 = tf.nn.relu(conv4_op)
88-
89-
# 第5个卷积层
90-
conv5_w = get_weight([CONV_SIZE, CONV_SIZE, CONV4_KERNEL_NUM, CONV5_KERNEL_NUM], regularizer)
91-
conv5_b = get_bias([CONV5_KERNEL_NUM])
92-
conv5 = conv2d(relu4, conv5_w)
93-
conv5_op = tf.nn.bias_add(conv5, conv5_b)
94-
95-
# 第2个add层
96-
add2 = tf.add(conv5_op, add1)
97-
98-
# 第5个激活层
99-
relu5 = tf.nn.relu(add2)
100-
101-
# 第6个卷积层
102-
conv6_w = get_weight([CONV_SIZE, CONV_SIZE, CONV5_KERNEL_NUM, CONV6_KERNEL_NUM], regularizer)
103-
conv6_b = get_bias([CONV6_KERNEL_NUM])
104-
conv6 = conv2d(relu5, conv6_w)
105-
conv6_op = tf.nn.bias_add(conv6, conv6_b)
106-
107-
# 第6个激活层
108-
relu6 = tf.nn.relu(conv6_op)
109-
110-
# 第7个卷积层
111-
conv7_w = get_weight([CONV_SIZE, CONV_SIZE, CONV6_KERNEL_NUM, CONV7_KERNEL_NUM], regularizer)
112-
conv7_b = get_bias([CONV7_KERNEL_NUM])
113-
conv7 = conv2d(relu6, conv7_w)
114-
conv7_op = tf.nn.bias_add(conv7, conv7_b)
115-
116-
# 第3个add层
117-
add3 = tf.add(conv7_op, add2)
118-
119-
# 第7个激活层
120-
relu7 = tf.nn.relu(add3)
121-
122-
# 第8个卷积层
123-
conv8_w = get_weight([CONV_SIZE, CONV_SIZE, CONV7_KERNEL_NUM, CONV8_KERNEL_NUM], regularizer)
124-
conv8_b = get_bias([CONV8_KERNEL_NUM])
125-
conv8 = conv2d(relu7, conv8_w)
126-
conv8_op = tf.nn.bias_add(conv8, conv8_b)
127-
128-
# 第8个激活层
129-
relu8 = tf.nn.relu(conv8_op)
130-
131-
# 第9个卷积层
132-
conv9_w = get_weight([CONV_SIZE, CONV_SIZE, CONV8_KERNEL_NUM, CONV9_KERNEL_NUM], regularizer)
133-
conv9_b = get_bias([CONV9_KERNEL_NUM])
134-
conv9 = conv2d(relu8, conv9_w)
135-
conv9_op = tf.nn.bias_add(conv9, conv9_b)
136-
137-
# 第4个add层
138-
add4 = tf.add(conv9_op, add3)
139-
140-
# 第9个激活层
141-
relu9 = tf.nn.relu(add4)
142-
143-
# 第10个卷积层
144-
conv10_w = get_weight([CONV_SIZE, CONV_SIZE, CONV9_KERNEL_NUM, CONV10_KERNEL_NUM], regularizer)
145-
conv10_b = get_bias([CONV10_KERNEL_NUM])
146-
conv10 = conv2d(relu9, conv10_w)
147-
conv10_op = tf.nn.bias_add(conv10, conv10_b)
148-
149-
# 第10个激活层
150-
relu10 = tf.nn.relu(conv10_op)
151-
152-
# 第11个卷积层
153-
conv11_w = get_weight([CONV_SIZE, CONV_SIZE, CONV10_KERNEL_NUM, CONV11_KERNEL_NUM], regularizer)
154-
conv11_b = get_bias([CONV11_KERNEL_NUM])
155-
conv11 = conv2d(relu10, conv11_w)
156-
conv11_op = tf.nn.bias_add(conv11, conv11_b)
157-
158-
# 第5个add层
159-
add5 = tf.add(conv11_op, add4)
160-
161-
# 第11个激活层
162-
relu11 = tf.nn.relu(add5)
163-
164-
# 第12个卷积层
165-
conv12_w = get_weight([CONV_SIZE, CONV_SIZE, CONV11_KERNEL_NUM, CONV12_KERNEL_NUM], regularizer)
166-
conv12_b = get_bias([CONV12_KERNEL_NUM])
167-
conv12 = conv2d(relu11, conv12_w)
168-
conv12_op = tf.nn.bias_add(conv12, conv12_b)
169-
170-
# 输出结果
171-
return conv12_op
1+
'''
2+
该模块为前向传播网路,定义网络结构
3+
'''
4+
import tensorflow as tf
5+
6+
CONV_SIZE = 5#卷积核大小
7+
8+
# 各层通道数
9+
CONV1_KERNEL_NUM = 64
10+
CONV2_KERNEL_NUM = 64
11+
CONV3_KERNEL_NUM = 64
12+
CONV4_KERNEL_NUM = 64
13+
CONV5_KERNEL_NUM = 64
14+
CONV6_KERNEL_NUM = 64
15+
CONV7_KERNEL_NUM = 64
16+
CONV8_KERNEL_NUM = 64
17+
CONV9_KERNEL_NUM = 64
18+
CONV10_KERNEL_NUM = 64
19+
CONV11_KERNEL_NUM = 64
20+
CONV12_KERNEL_NUM = 1
21+
22+
# 生成卷积核,权重w
23+
def get_weight(shape, regularizer=None):
24+
'''生成卷积核,卷积核形状shape,权重w由正态分布产生'''
25+
w = tf.Variable(tf.truncated_normal(shape, stddev=0.01))
26+
if regularizer != None:
27+
tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(regularizer)(w))
28+
return w
29+
30+
# 生成偏置项
31+
def get_bias(shape):
32+
'''初始化0偏置项b'''
33+
b = tf.Variable(tf.zeros(shape))
34+
return b
35+
36+
def conv2d(x,w):
37+
'''
38+
定义卷积操作
39+
x:输入图片
40+
w:卷积核
41+
strides:移动步长,左右上下移动步长为1
42+
padding:0填充操作,为了卷积操作后图片维度与原来相等
43+
'''
44+
return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
45+
46+
47+
# 定义前向传播网络,按照文章的ResNet
48+
def forward(x, channel, regularizer=None):
49+
50+
# 第1个卷积层
51+
conv1_w = get_weight([CONV_SIZE, CONV_SIZE, channel, CONV1_KERNEL_NUM], regularizer)
52+
conv1_b = get_bias([CONV1_KERNEL_NUM])
53+
conv1 = conv2d(x, conv1_w)
54+
conv1_op = tf.nn.bias_add(conv1, conv1_b)
55+
56+
# 第1个激活
57+
relu1 = tf.nn.relu(conv1_op)
58+
59+
# 第2个卷积层
60+
conv2_w = get_weight([CONV_SIZE, CONV_SIZE, CONV1_KERNEL_NUM, CONV2_KERNEL_NUM], regularizer)
61+
conv2_b = get_bias([CONV2_KERNEL_NUM])
62+
conv2 = conv2d(relu1, conv2_w)
63+
conv2_op = tf.nn.bias_add(conv2, conv2_b)
64+
65+
# 第2个激活
66+
relu2 = tf.nn.relu(conv2_op)
67+
68+
# 第3个卷积层
69+
conv3_w = get_weight([CONV_SIZE, CONV_SIZE, CONV2_KERNEL_NUM, CONV3_KERNEL_NUM], regularizer)
70+
conv3_b = get_bias([CONV3_KERNEL_NUM])
71+
conv3 = conv2d(relu2, conv3_w)
72+
conv3_op = tf.nn.bias_add(conv3,conv3_b)
73+
74+
# 第1个add层
75+
add1 = tf.add(conv3_op, conv1_op)
76+
77+
# 第3个激活层
78+
relu3 = tf.nn.relu(add1)
79+
80+
# 第4个卷积层
81+
conv4_w = get_weight([CONV_SIZE, CONV_SIZE, CONV3_KERNEL_NUM, CONV4_KERNEL_NUM], regularizer)
82+
conv4_b = get_bias([CONV4_KERNEL_NUM])
83+
conv4 = conv2d(relu3, conv4_w)
84+
conv4_op = tf.nn.bias_add(conv4, conv4_b)
85+
86+
# 第4个激活层
87+
relu4 = tf.nn.relu(conv4_op)
88+
89+
# 第5个卷积层
90+
conv5_w = get_weight([CONV_SIZE, CONV_SIZE, CONV4_KERNEL_NUM, CONV5_KERNEL_NUM], regularizer)
91+
conv5_b = get_bias([CONV5_KERNEL_NUM])
92+
conv5 = conv2d(relu4, conv5_w)
93+
conv5_op = tf.nn.bias_add(conv5, conv5_b)
94+
95+
# 第2个add层
96+
add2 = tf.add(conv5_op, add1)
97+
98+
# 第5个激活层
99+
relu5 = tf.nn.relu(add2)
100+
101+
# 第6个卷积层
102+
conv6_w = get_weight([CONV_SIZE, CONV_SIZE, CONV5_KERNEL_NUM, CONV6_KERNEL_NUM], regularizer)
103+
conv6_b = get_bias([CONV6_KERNEL_NUM])
104+
conv6 = conv2d(relu5, conv6_w)
105+
conv6_op = tf.nn.bias_add(conv6, conv6_b)
106+
107+
# 第6个激活层
108+
relu6 = tf.nn.relu(conv6_op)
109+
110+
# 第7个卷积层
111+
conv7_w = get_weight([CONV_SIZE, CONV_SIZE, CONV6_KERNEL_NUM, CONV7_KERNEL_NUM], regularizer)
112+
conv7_b = get_bias([CONV7_KERNEL_NUM])
113+
conv7 = conv2d(relu6, conv7_w)
114+
conv7_op = tf.nn.bias_add(conv7, conv7_b)
115+
116+
# 第3个add层
117+
add3 = tf.add(conv7_op, add2)
118+
119+
# 第7个激活层
120+
relu7 = tf.nn.relu(add3)
121+
122+
# 第8个卷积层
123+
conv8_w = get_weight([CONV_SIZE, CONV_SIZE, CONV7_KERNEL_NUM, CONV8_KERNEL_NUM], regularizer)
124+
conv8_b = get_bias([CONV8_KERNEL_NUM])
125+
conv8 = conv2d(relu7, conv8_w)
126+
conv8_op = tf.nn.bias_add(conv8, conv8_b)
127+
128+
# 第8个激活层
129+
relu8 = tf.nn.relu(conv8_op)
130+
131+
# 第9个卷积层
132+
conv9_w = get_weight([CONV_SIZE, CONV_SIZE, CONV8_KERNEL_NUM, CONV9_KERNEL_NUM], regularizer)
133+
conv9_b = get_bias([CONV9_KERNEL_NUM])
134+
conv9 = conv2d(relu8, conv9_w)
135+
conv9_op = tf.nn.bias_add(conv9, conv9_b)
136+
137+
# 第4个add层
138+
add4 = tf.add(conv9_op, add3)
139+
140+
# 第9个激活层
141+
relu9 = tf.nn.relu(add4)
142+
143+
# 第10个卷积层
144+
conv10_w = get_weight([CONV_SIZE, CONV_SIZE, CONV9_KERNEL_NUM, CONV10_KERNEL_NUM], regularizer)
145+
conv10_b = get_bias([CONV10_KERNEL_NUM])
146+
conv10 = conv2d(relu9, conv10_w)
147+
conv10_op = tf.nn.bias_add(conv10, conv10_b)
148+
149+
# 第10个激活层
150+
relu10 = tf.nn.relu(conv10_op)
151+
152+
# 第11个卷积层
153+
conv11_w = get_weight([CONV_SIZE, CONV_SIZE, CONV10_KERNEL_NUM, CONV11_KERNEL_NUM], regularizer)
154+
conv11_b = get_bias([CONV11_KERNEL_NUM])
155+
conv11 = conv2d(relu10, conv11_w)
156+
conv11_op = tf.nn.bias_add(conv11, conv11_b)
157+
158+
# 第5个add层
159+
add5 = tf.add(conv11_op, add4)
160+
161+
# 第11个激活层
162+
relu11 = tf.nn.relu(add5)
163+
164+
# 第12个卷积层
165+
conv12_w = get_weight([CONV_SIZE, CONV_SIZE, CONV11_KERNEL_NUM, CONV12_KERNEL_NUM], regularizer)
166+
conv12_b = get_bias([CONV12_KERNEL_NUM])
167+
conv12 = conv2d(relu11, conv12_w)
168+
conv12_op = tf.nn.bias_add(conv12, conv12_b)
169+
170+
# 输出结果
171+
return conv12_op

0 commit comments

Comments
 (0)