文档库 最新最全的文档下载
当前位置:文档库 › caffe源码解析 — caffe.proto

caffe源码解析 — caffe.proto

caffe源码解析 — caffe.proto
caffe源码解析 — caffe.proto

要看caffe源码,首先应该看的就是caffe.proto。

它位于…\src\caffe\proto目录下,在这个文件夹下还有一个https://www.wendangku.net/doc/a54222450.html,和一个.pb.h文件,这两个文件都是由caffe.proto编译而来的。

在caffe.proto中定义了很多结构化数据,包括:

?BlobProto

?Datum

?FillerParameter

?NetParameter

?SolverParameter

?SolverState

?LayerParameter

?ConcatParameter

?ConvolutionParameter

?DataParameter

?DropoutParameter

?HDF5DataParameter

?HDF5OutputParameter

?ImageDataParameter

?InfogainLossParameter

?InnerProductParameter

?LRNParameter

?MemoryDataParameter

?PoolingParameter

?PowerParameter

?WindowDataParameter

?V0LayerParameter

caffe.proto中的几个重要数据类型

https://www.wendangku.net/doc/a54222450.html,里面的东西都是从caffe.proto编译而来的,无非就是一些关于这些数据结构(类)的标准化操作,比如

void CopyFrom();

void MergeFrom();

void Clear();

bool IsInitialized() const;

int ByteSize() const;

bool MergePartialFromCodedStream();

void SerializeWithCachedSizes() const;

SerializeWithCachedSizesToArray() const;

int GetCachedSize()

void SharedCtor();

void SharedDtor();

void SetCachedSize() const;

<0> BlobProto

message BlobProto {//blob的属性以及blob中的数据(data\diff)

optional int32 num = 1 [default = 0];

optional int32 channels = 2 [default = 0];

optional int32 height = 3 [default = 0];

optional int32 width = 4 [default = 0];

repeated float data = 5 [packed = true];

repeated float diff = 6 [packed = true];

}

<1> Datum

message Datum {

optional int32 channels = 1;

optional int32 height = 2;

optional int32 width = 3;

optional bytes data = 4;//真实的图像数据,以字节存储(bytes) optional int32 label = 5;

repeated float float_data = 6;//datum也能存float类型的数据

(float)

}

<2> LayerParameter

message LayerParameter {

repeated string bottom = 2; //输入的blob的名字(string)

repeated string top = 3; //输出的blob的名字(string)

optional string name = 4; //层的名字

enum LayerType { //层的枚举(enum,和c++中的enum一样)

NONE = 0;

ACCURACY = 1;

BNLL = 2;

CONCAT = 3;

CONVOLUTION = 4;

DATA = 5;

DROPOUT = 6;

EUCLIDEAN_LOSS = 7;

ELTWISE_PRODUCT = 25;

FLATTEN = 8;

HDF5_DATA = 9;

HDF5_OUTPUT = 10;

HINGE_LOSS = 28;

IM2COL = 11;

IMAGE_DATA = 12;

INFOGAIN_LOSS = 13;

INNER_PRODUCT = 14;

LRN = 15;

MEMORY_DATA = 29;

MULTINOMIAL_LOGISTIC_LOSS = 16;

POOLING = 17;

POWER = 26;

RELU = 18;

SIGMOID = 19;

SIGMOID_CROSS_ENTROPY_LOSS = 27;

SOFTMAX = 20;

SOFTMAX_LOSS = 21;

SPLIT = 22;

TANH = 23;

WINDOW_DATA = 24;

}

optional LayerType type = 5; // 层的类型

repeated BlobProto blobs = 6; //blobs的数值参数

repeated float blobs_lr = 7; //学习速率(repeated),如果你想设置一个blob的学习速率,你需要设置所有blob的学习速率。

repeated float weight_decay = 8; //权值衰减(repeated)

// 相对于某一特定层的参数(optional)

optional ConcatParameter concat_param = 9;

optional ConvolutionParameter convolution_param = 10;

optional DataParameter data_param = 11;

optional DropoutParameter dropout_param = 12;

optional HDF5DataParameter hdf5_data_param = 13;

optional HDF5OutputParameter hdf5_output_param = 14;

optional ImageDataParameter image_data_param = 15;

optional InfogainLossParameter infogain_loss_param = 16; optional InnerProductParameter inner_product_param = 17; optional LRNParameter lrn_param = 18;

optional MemoryDataParameter memory_data_param = 22;

optional PoolingParameter pooling_param = 19;

optional PowerParameter power_param = 21;

optional WindowDataParameter window_data_param = 20;

optional V0LayerParameter layer = 1;

}

<3> NetParameter

message NetParameter {

optional string name = 1;//网络的名字

repeated LayerParameter layers = 2; //repeated类似于数组

repeated string input = 3;//输入层blob的名字

repeated int32 input_dim = 4;//输入层blob的维度,应该等于(4* #input)

optional bool force_backward = 5 [default = false];//网络

是否进行反向传播。如果设置为否,则由网络的结构和学习速率来决定是否进行反向传播。

}

<4> SolverParameter

message SolverParameter {

optional string train_net = 1; // 训练网络的proto file

optional string test_net = 2; // 测试网络的proto file

optional int32 test_iter = 3 [default = 0]; // 每次测试时的迭代次数

optional int32 test_interval = 4 [default = 0]; // 两次测试的间隔迭代次数

optional bool test_compute_loss = 19 [default = false]; optional float base_lr = 5; // 基本学习率

optional int32 display = 6; // 两次显示的间隔迭代次数

optional int32 max_iter = 7; // 最大迭代次数

optional string lr_policy = 8; // 学习速率衰减方式

optional float gamma = 9; // 关于梯度下降的一个参数

optional float power = 10; // 计算学习率的一个参数

optional float momentum = 11; // 动量

optional float weight_decay = 12; // 权值衰减

optional int32 stepsize = 13; // 学习速率的衰减步长

optional int32 snapshot = 14 [default = 0]; // snapshot的间隔

optional string snapshot_prefix = 15; // snapshot的前缀

optional bool snapshot_diff = 16 [default = false]; // 是否对于 diff 进行 snapshot

enum SolverMode {

CPU = 0;

GPU = 1;

}

optional SolverMode solver_mode = 17 [default = GPU]; // solver的模式,默认为GPU

optional int32 device_id = 18 [default = 0]; // GPU的ID optional int64 random_seed = 20 [default = -1]; // 随机数种子

}

caffe.proto源码

// Copyright 2014 BVLC and contributors.

package caffe;

message BlobProto {

optional int32 num = 1 [default = 0];

optional int32 channels = 2 [default = 0];

optional int32 height = 3 [default = 0];

optional int32 width = 4 [default = 0];

repeated float data = 5 [packed = true];

repeated float diff = 6 [packed = true];

}

// The BlobProtoVector is simply a way to pass multiple blo bproto instances

// around.

message BlobProtoVector {

repeated BlobProto blobs = 1;

}

message Datum {

optional int32 channels = 1;

optional int32 height = 2;

optional int32 width = 3;

// the actual image data, in bytes

optional bytes data = 4;

optional int32 label = 5;

// Optionally, the datum could also hold float data.

repeated float float_data = 6;

}

message FillerParameter {

// The filler type.

optional string type = 1 [default = 'constant'];

optional float value = 2 [default = 0]; // the value in co nstant filler

optional float min = 3 [default = 0]; // the min value in uniform filler

optional float max = 4 [default = 1]; // the max value in uniform filler

optional float mean = 5 [default = 0]; // the mean value i n Gaussian filler

optional float std = 6 [default = 1]; // the std value in Gaussian filler

// The expected number of non-zero input weights for a gi ven output in

// Gaussian filler -- the default -1 means don't perform sparsification.

optional int32 sparse = 7 [default = -1];

}

message NetParameter {

optional string name = 1; // consider giving the network a name

repeated LayerParameter layers = 2; // a bunch of layers. // The input blobs to the network.

repeated string input = 3;

// The dim of the input blobs. For each input blob there s hould be four

// values specifying the num, channels, height and width of the input blob.

// Thus, there should be a total of (4 * #input) numbers. repeated int32 input_dim = 4;

// Whether the network will force every layer to carry ou t backward operation.

// If set False, then whether to carry out backward is de termined

// automatically according to the net structure and learn ing rates.

optional bool force_backward = 5 [default = false];

}

message SolverParameter {

optional string train_net = 1; // The proto file for the training net.

optional string test_net = 2; // The proto file for the t esting net.

// The number of iterations for each testing phase.

optional int32 test_iter = 3 [default = 0];

// The number of iterations between two testing phases. optional int32 test_interval = 4 [default = 0];

optional bool test_compute_loss = 19 [default = false]; optional float base_lr = 5; // The base learning rate

// the number of iterations between displaying info. If d isplay = 0, no info

// will be displayed.

optional int32 display = 6;

optional int32 max_iter = 7; // the maximum number of ite rations

optional string lr_policy = 8; // The learning rate decay policy.

optional float gamma = 9; // The parameter to compute the learning rate.

optional float power = 10; // The parameter to compute th e learning rate.

optional float momentum = 11; // The momentum value.

optional float weight_decay = 12; // The weight decay.

optional int32 stepsize = 13; // the stepsize for learnin g rate policy "step"

optional int32 snapshot = 14 [default = 0]; // The snapsh ot interval

optional string snapshot_prefix = 15; // The prefix for t he snapshot.

// whether to snapshot diff in the results or not. Snapsh otting diff will help

// debugging but the final protocol buffer size will be m uch larger.

optional bool snapshot_diff = 16 [default = false];

// the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default.

enum SolverMode {

CPU = 0;

GPU = 1;

}

optional SolverMode solver_mode = 17 [default = GPU];

// the device_id will that be used in GPU mode. Use devic e_id = 0 in default.

optional int32 device_id = 18 [default = 0];

// If non-negative, the seed with which the Solver will i nitialize the Caffe

// random number generator -- useful for reproducible res ults. Otherwise,

// (and by default) initialize using a seed derived from the system clock.

optional int64 random_seed = 20 [default = -1];

}

// A message that stores the solver snapshots

message SolverState {

optional int32 iter = 1; // The current iteration

optional string learned_net = 2; // The file that stores the learned net.

repeated BlobProto history = 3; // The history for sgd so lvers

}

// Update the next available ID when you add a new LayerPar ameter field.

//

// LayerParameter next available ID: 23 (last added: memory _data_param)

message LayerParameter {

repeated string bottom = 2; // the name of the bottom blo bs

repeated string top = 3; // the name of the top blobs

optional string name = 4; // the layer name

// Add new LayerTypes to the enum below in lexicographica l order (other than

// starting with NONE), starting with the next available ID in the comment

// line above the enum. Update the next available ID when you add a new

// LayerType.

//

// LayerType next available ID: 30 (last added: MEMORY_DA TA)

enum LayerType {

// "NONE" layer type is 0th enum element so that we don' t cause confusion

// by defaulting to an existent LayerType (instead, sho uld usually error if

// the type is unspecified).

NONE = 0;

ACCURACY = 1;

BNLL = 2;

CONCAT = 3;

CONVOLUTION = 4;

DATA = 5;

DROPOUT = 6;

EUCLIDEAN_LOSS = 7;

ELTWISE_PRODUCT = 25;

FLATTEN = 8;

HDF5_DATA = 9;

HDF5_OUTPUT = 10;

HINGE_LOSS = 28;

IM2COL = 11;

IMAGE_DATA = 12;

INFOGAIN_LOSS = 13;

INNER_PRODUCT = 14;

LRN = 15;

MEMORY_DATA = 29;

MULTINOMIAL_LOGISTIC_LOSS = 16;

POOLING = 17;

POWER = 26;

RELU = 18;

SIGMOID = 19;

SIGMOID_CROSS_ENTROPY_LOSS = 27;

SOFTMAX = 20;

SOFTMAX_LOSS = 21;

SPLIT = 22;

TANH = 23;

WINDOW_DATA = 24;

}

optional LayerType type = 5; // the layer type from the e num above

// The blobs containing the numeric parameters of the lay er

repeated BlobProto blobs = 6;

// The ratio that is multiplied on the global learning ra te. If you want to

// set the learning ratio for one blob, you need to set it for all blobs.

repeated float blobs_lr = 7;

// The weight decay that is multiplied on the global weig ht decay.

repeated float weight_decay = 8;

// Parameters for particular layer types.

optional ConcatParameter concat_param = 9;

optional ConvolutionParameter convolution_param = 10;

optional DataParameter data_param = 11;

optional DropoutParameter dropout_param = 12;

optional HDF5DataParameter hdf5_data_param = 13;

optional HDF5OutputParameter hdf5_output_param = 14;

optional ImageDataParameter image_data_param = 15;

optional InfogainLossParameter infogain_loss_param = 16;

optional InnerProductParameter inner_product_param = 17; optional LRNParameter lrn_param = 18;

optional MemoryDataParameter memory_data_param = 22;

optional PoolingParameter pooling_param = 19;

optional PowerParameter power_param = 21;

optional WindowDataParameter window_data_param = 20;

// DEPRECATED: The layer parameters specified as a V0Laye rParameter.

// This should never be used by any code except to upgrad e to the new

// LayerParameter specification.

optional V0LayerParameter layer = 1;

}

// Message that stores parameters used by ConcatLayer message ConcatParameter {

// Concat Layer needs to specify the dimension along the concat will happen,

// the other dimensions must be the same for all the bott om blobs

// By default it will concatenate blobs along channels di mension

optional uint32 concat_dim = 1 [default = 1];

}

// Message that stores parameters used by ConvolutionLayer message ConvolutionParameter {

optional uint32 num_output = 1; // The number of outputs for the layer

optional bool bias_term = 2 [default = true]; // whether to have bias terms

optional uint32 pad = 3 [default = 0]; // The padding siz e

optional uint32 kernel_size = 4; // The kernel size

optional uint32 group = 5 [default = 1]; // The group siz e for group conv

optional uint32 stride = 6 [default = 1]; // The stride

optional FillerParameter weight_filler = 7; // The filler for the weight

optional FillerParameter bias_filler = 8; // The filler f or the bias

}

// Message that stores parameters used by DataLayer message DataParameter {

// Specify the data source.

optional string source = 1;

// For data pre-processing, we can do simple scaling and subtracting the

// data mean, if provided. Note that the mean subtraction is always carried

// out before scaling.

optional float scale = 2 [default = 1];

optional string mean_file = 3;

// Specify the batch size.

optional uint32 batch_size = 4;

// Specify if we would like to randomly crop an image.

optional uint32 crop_size = 5 [default = 0];

// Specify if we want to randomly mirror data.

optional bool mirror = 6 [default = false];

// The rand_skip variable is for the data layer to skip a few data points

// to avoid all asynchronous sgd clients to start at the same point. The skip

// point would be set as rand_skip * rand(0,1). Note that rand_skip should not

// be larger than the number of keys in the leveldb.

optional uint32 rand_skip = 7 [default = 0];

}

// Message that stores parameters used by DropoutLayer message DropoutParameter {

optional float dropout_ratio = 1 [default = 0.5]; // drop out ratio

}

// Message that stores parameters used by HDF5DataLayer message HDF5DataParameter {

// Specify the data source.

optional string source = 1;

// Specify the batch size.

optional uint32 batch_size = 2;

}

// Message that stores parameters used by HDF5OutputLayer message HDF5OutputParameter {

optional string file_name = 1;

}

// Message that stores parameters used by ImageDataLayer message ImageDataParameter {

// Specify the data source.

optional string source = 1;

// For data pre-processing, we can do simple scaling and subtracting the

// data mean, if provided. Note that the mean subtraction is always carried

// out before scaling.

optional float scale = 2 [default = 1];

optional string mean_file = 3;

// Specify the batch size.

optional uint32 batch_size = 4;

// Specify if we would like to randomly crop an image.

optional uint32 crop_size = 5 [default = 0];

// Specify if we want to randomly mirror data.

optional bool mirror = 6 [default = false];

// The rand_skip variable is for the data layer to skip a few data points

// to avoid all asynchronous sgd clients to start at the same point. The skip

// point would be set as rand_skip * rand(0,1). Note that rand_skip should not

// be larger than the number of keys in the leveldb.

optional uint32 rand_skip = 7 [default = 0];

// Whether or not ImageLayer should shuffle the list of f iles at every epoch.

optional bool shuffle = 8 [default = false];

// It will also resize images if new_height or new_width are not zero.

optional uint32 new_height = 9 [default = 0];

optional uint32 new_width = 10 [default = 0];

}

// Message that stores parameters InfogainLossLayer message InfogainLossParameter {

// Specify the infogain matrix source.

optional string source = 1;

}

// Message that stores parameters used by InnerProductLaye r

message InnerProductParameter {

optional uint32 num_output = 1; // The number of outputs for the layer

optional bool bias_term = 2 [default = true]; // whether to have bias terms

optional FillerParameter weight_filler = 3; // The filler for the weight

optional FillerParameter bias_filler = 4; // The filler f or the bias

}

// Message that stores parameters used by LRNLayer message LRNParameter {

optional uint32 local_size = 1 [default = 5];

optional float alpha = 2 [default = 1.];

optional float beta = 3 [default = 0.75];

enum NormRegion {

ACROSS_CHANNELS = 0;

WITHIN_CHANNEL = 1;

}

optional NormRegion norm_region = 4 [default = ACROSS_CHA NNELS];

}

// Message that stores parameters used by MemoryDataLayer message MemoryDataParameter {

optional uint32 batch_size = 1;

optional uint32 channels = 2;

optional uint32 height = 3;

optional uint32 width = 4;

}

// Message that stores parameters used by PoolingLayer message PoolingParameter {

enum PoolMethod {

MAX = 0;

AVE = 1;

STOCHASTIC = 2;

}

optional PoolMethod pool = 1 [default = MAX]; // The pool ing method

optional uint32 kernel_size = 2; // The kernel size

optional uint32 stride = 3 [default = 1]; // The stride

// The padding size -- currently implemented only for ave rage pooling.

optional uint32 pad = 4 [default = 0];

}

// Message that stores parameters used by PowerLayer message PowerParameter {

// PowerLayer computes outputs y = (shift + scale * x) ^ p ower.

optional float power = 1 [default = 1.0];

optional float scale = 2 [default = 1.0];

optional float shift = 3 [default = 0.0];

}

// Message that stores parameters used by WindowDataLayer message WindowDataParameter {

// Specify the data source.

optional string source = 1;

// For data pre-processing, we can do simple scaling and subtracting the

// data mean, if provided. Note that the mean subtraction is always carried

// out before scaling.

optional float scale = 2 [default = 1];

optional string mean_file = 3;

// Specify the batch size.

optional uint32 batch_size = 4;

// Specify if we would like to randomly crop an image.

optional uint32 crop_size = 5 [default = 0];

// Specify if we want to randomly mirror data.

optional bool mirror = 6 [default = false];

// Foreground (object) overlap threshold

optional float fg_threshold = 7 [default = 0.5];

// Background (non-object) overlap threshold

optional float bg_threshold = 8 [default = 0.5];

// Fraction of batch that should be foreground objects

optional float fg_fraction = 9 [default = 0.25];

// Amount of contextual padding to add around a window

// (used only by the window_data_layer)

optional uint32 context_pad = 10 [default = 0];

// Mode for cropping out a detection window

// warp: cropped window is warped to a fixed size and asp ect ratio

// square: the tightest square around the window is cropp ed

optional string crop_mode = 11 [default = "warp"];

}

// DEPRECATED: V0LayerParameter is the old way of specifyin g layer parameters

// in Caffe. We keep this message type around for legacy s upport.

message V0LayerParameter {

optional string name = 1; // the layer name

optional string type = 2; // the string to specify the la yer type

// Parameters to specify layers with inner products.

optional uint32 num_output = 3; // The number of outputs for the layer

optional bool biasterm = 4 [default = true]; // whether t o have bias terms

optional FillerParameter weight_filler = 5; // The filler for the weight

optional FillerParameter bias_filler = 6; // The filler f or the bias

optional uint32 pad = 7 [default = 0]; // The padding siz e

optional uint32 kernelsize = 8; // The kernel size

optional uint32 group = 9 [default = 1]; // The group siz e for group conv

optional uint32 stride = 10 [default = 1]; // The stride enum PoolMethod {

MAX = 0;

AVE = 1;

STOCHASTIC = 2;

}

optional PoolMethod pool = 11 [default = MAX]; // The poo ling method

optional float dropout_ratio = 12 [default = 0.5]; // dro pout ratio

optional uint32 local_size = 13 [default = 5]; // for loc al response norm

optional float alpha = 14 [default = 1.]; // for local re sponse norm

optional float beta = 15 [default = 0.75]; // for local r esponse norm

// For data layers, specify the data source

optional string source = 16;

// For data pre-processing, we can do simple scaling and subtracting the

// data mean, if provided. Note that the mean subtraction is always carried

// out before scaling.

optional float scale = 17 [default = 1];

optional string meanfile = 18;

// For data layers, specify the batch size.

optional uint32 batchsize = 19;

// For data layers, specify if we would like to randomly crop an image.

optional uint32 cropsize = 20 [default = 0];

// For data layers, specify if we want to randomly mirror data.

optional bool mirror = 21 [default = false];

// The blobs containing the numeric parameters of the lay er

repeated BlobProto blobs = 50;

// The ratio that is multiplied on the global learning ra te. If you want to

// set the learning ratio for one blob, you need to set it for all blobs.

repeated float blobs_lr = 51;

// The weight decay that is multiplied on the global weig ht decay.

repeated float weight_decay = 52;

// The rand_skip variable is for the data layer to skip a few data points

// to avoid all asynchronous sgd clients to start at the same point. The skip

// point would be set as rand_skip * rand(0,1). Note that rand_skip should not

// be larger than the number of keys in the leveldb.

optional uint32 rand_skip = 53 [default = 0];

// Fields related to detection (det_*)

// foreground (object) overlap threshold

optional float det_fg_threshold = 54 [default = 0.5];

// background (non-object) overlap threshold

optional float det_bg_threshold = 55 [default = 0.5];

// Fraction of batch that should be foreground objects

optional float det_fg_fraction = 56 [default = 0.25];

// optional bool OBSOLETE_can_clobber = 57 [default = tru e];

// Amount of contextual padding to add around a window

// (used only by the window_data_layer)

optional uint32 det_context_pad = 58 [default = 0];

// Mode for cropping out a detection window

// warp: cropped window is warped to a fixed size and asp ect ratio

// square: the tightest square around the window is cropp ed

optional string det_crop_mode = 59 [default = "warp"];

// For ReshapeLayer, one needs to specify the new dimensi ons.

optional int32 new_num = 60 [default = 0];

optional int32 new_channels = 61 [default = 0];

optional int32 new_height = 62 [default = 0];

optional int32 new_width = 63 [default = 0];

// Whether or not ImageLayer should shuffle the list of f iles at every epoch.

// It will also resize images if new_height or new_width are not zero.

optional bool shuffle_images = 64 [default = false];

// For ConcatLayer, one needs to specify the dimension fo r concatenation, and

// the other dimensions must be the same for all the bott om blobs.

// By default it will concatenate blobs along the channel s dimension.

optional uint32 concat_dim = 65 [default = 1];

optional HDF5OutputParameter hdf5_output_param = 1001; }

相关文档