分享

caffe python layer

 mscdj 2016-10-13

caffe的大多数层是由c++写成的,借助于c++的高效性,网络可以快速训练。但是我们有时候需要自己写点输入层以应对各种不同的数据输入,比如你因为是需要在图像中取块而不想写成LMDB,这时候可以考虑使用python直接写一个层。而且输入层不需要GPU加速,所需写起来也比较容易。

python层怎么用

先看一个网上的例子吧(来自http://chrischoy./research/caffe-python-layer/

  1. layer {
  2. type: 'Python'
  3. name: 'loss'
  4. top: 'loss'
  5. bottom: 'ipx'
  6. bottom: 'ipy'
  7. python_param {
  8. # the module name -- usually the filename -- that needs to be in $PYTHONPATH
  9. module: 'pyloss'
  10. # the layer name -- the class name in the module
  11. layer: 'EuclideanLossLayer'
  12. }
  13. # set loss weight so Caffe knows this is a loss layer
  14. loss_weight: 1
  15. }

这里的type就只有Python一种,然后top,bottom和常见的层是一样的,module就是你的python module名字,一般就是文件名,然后layer就是定义的类的名字。

python层怎么写

这里就以 Fully Convolutional Networks for Semantic Segmentation 论文中公布的代码作为示例,解释python层该怎么写。

  1. import caffe

  2. import numpy as np
  3. from PIL import Image

  4. import random

  5. class VOCSegDataLayer(caffe.Layer):
  6. """ Load (input image, label image) pairs from PASCAL VOC one-at-a-time while reshaping the net to preserve dimensions. Use this to feed data to a fully convolutional network. """

  7. def setup(self, bottom, top):
  8. """ Setup data layer according to parameters: - voc_dir: path to PASCAL VOC year dir - split: train / val / test - mean: tuple of mean values to subtract - randomize: load in random order (default: True) - seed: seed for randomization (default: None / current time) for PASCAL VOC semantic segmentation. example params = dict(voc_dir="/path/to/PASCAL/VOC2011", mean=(104.00698793, 116.66876762, 122.67891434), split="val") """
  9. # config
  10. params = eval(self.param_str)
  11. self.voc_dir = params['voc_dir']
  12. self.split = params['split']
  13. self.mean = np.array(params['mean'])
  14. self.random = params.get('randomize', True)
  15. self.seed = params.get('seed', None)

  16. # two tops: data and label
  17. if len(top) != 2:
  18. raise Exception("Need to define two tops: data and label.")
  19. # data layers have no bottoms
  20. if len(bottom) != 0:
  21. raise Exception("Do not define a bottom.")

  22. # load indices for images and labels
  23. split_f = '{}/ImageSets/Segmentation/{}.txt'.format(self.voc_dir,
  24. self.split)
  25. self.indices = open(split_f, 'r').read().splitlines()
  26. self.idx = 0

  27. # make eval deterministic
  28. if 'train' not in self.split:
  29. self.random = False

  30. # randomization: seed and pick
  31. if self.random:
  32. random.seed(self.seed)
  33. self.idx = random.randint(0, len(self.indices)-1)


  34. def reshape(self, bottom, top):
  35. # load image + label image pair
  36. self.data = self.load_image(self.indices[self.idx])
  37. self.label = self.load_label(self.indices[self.idx])
  38. # reshape tops to fit (leading 1 is for batch dimension)
  39. top[0].reshape(1, *self.data.shape)
  40. top[1].reshape(1, *self.label.shape)


  41. def forward(self, bottom, top):
  42. # assign output
  43. top[0].data[...] = self.data
  44. top[1].data[...] = self.label

  45. # pick next input
  46. if self.random:
  47. self.idx = random.randint(0, len(self.indices)-1)
  48. else:
  49. self.idx += 1
  50. if self.idx == len(self.indices):
  51. self.idx = 0


  52. def backward(self, top, propagate_down, bottom):
  53. pass


  54. def load_image(self, idx):
  55. """ Load input image and preprocess for Caffe: - cast to float - switch channels RGB -> BGR - subtract mean - transpose to channel x height x width order """
  56. im = Image.open('{}/JPEGImages/{}.jpg'.format(self.voc_dir, idx))
  57. in_ = np.array(im, dtype=np.float32)
  58. in_ = in_[:,:,::-1]
  59. in_ -= self.mean
  60. in_ = in_.transpose((2,0,1))
  61. return in_


  62. def load_label(self, idx):
  63. """ Load label image as 1 x height x width integer array of label indices. The leading singleton dimension is required by the loss. """
  64. im = Image.open('{}/SegmentationClass/{}.png'.format(self.voc_dir, idx))
  65. label = np.array(im, dtype=np.uint8)
  66. label = label[np.newaxis, ...]
  67. return label


  68. class SBDDSegDataLayer(caffe.Layer):
  69. """ Load (input image, label image) pairs from the SBDD extended labeling of PASCAL VOC for semantic segmentation one-at-a-time while reshaping the net to preserve dimensions. Use this to feed data to a fully convolutional network. """

  70. def setup(self, bottom, top):
  71. """ Setup data layer according to parameters: - sbdd_dir: path to SBDD `dataset` dir - split: train / seg11valid - mean: tuple of mean values to subtract - randomize: load in random order (default: True) - seed: seed for randomization (default: None / current time) for SBDD semantic segmentation. N.B.segv11alid is the set of segval11 that does not intersect with SBDD. Find it here: https://gist.github.com/shelhamer/edb330760338892d511e. example params = dict(sbdd_dir="/path/to/SBDD/dataset", mean=(104.00698793, 116.66876762, 122.67891434), split="valid") """
  72. # config
  73. params = eval(self.param_str)
  74. self.sbdd_dir = params['sbdd_dir']
  75. self.split = params['split']
  76. self.mean = np.array(params['mean'])
  77. self.random = params.get('randomize', True)
  78. self.seed = params.get('seed', None)

  79. # two tops: data and label
  80. if len(top) != 2:
  81. raise Exception("Need to define two tops: data and label.")
  82. # data layers have no bottoms
  83. if len(bottom) != 0:
  84. raise Exception("Do not define a bottom.")

  85. # load indices for images and labels
  86. split_f = '{}/{}.txt'.format(self.sbdd_dir,
  87. self.split)
  88. self.indices = open(split_f, 'r').read().splitlines()
  89. self.idx = 0

  90. # make eval deterministic
  91. if 'train' not in self.split:
  92. self.random = False

  93. # randomization: seed and pick
  94. if self.random:
  95. random.seed(self.seed)
  96. self.idx = random.randint(0, len(self.indices)-1)


  97. def reshape(self, bottom, top):
  98. # load image + label image pair
  99. self.data = self.load_image(self.indices[self.idx])
  100. self.label = self.load_label(self.indices[self.idx])
  101. # reshape tops to fit (leading 1 is for batch dimension)
  102. top[0].reshape(1, *self.data.shape)
  103. top[1].reshape(1, *self.label.shape)


  104. def forward(self, bottom, top):
  105. # assign output
  106. top[0].data[...] = self.data
  107. top[1].data[...] = self.label

  108. # pick next input
  109. if self.random:
  110. self.idx = random.randint(0, len(self.indices)-1)
  111. else:
  112. self.idx += 1
  113. if self.idx == len(self.indices):
  114. self.idx = 0


  115. def backward(self, top, propagate_down, bottom):
  116. pass


  117. def load_image(self, idx):
  118. """ Load input image and preprocess for Caffe: - cast to float - switch channels RGB -> BGR - subtract mean - transpose to channel x height x width order """
  119. im = Image.open('{}/img/{}.jpg'.format(self.sbdd_dir, idx))
  120. in_ = np.array(im, dtype=np.float32)
  121. in_ = in_[:,:,::-1]
  122. in_ -= self.mean
  123. in_ = in_.transpose((2,0,1))
  124. return in_


  125. def load_label(self, idx):
  126. """ Load label image as 1 x height x width integer array of label indices. The leading singleton dimension is required by the loss. """
  127. import scipy.io
  128. mat = scipy.io.loadmat('{}/cls/{}.mat'.format(self.sbdd_dir, idx))
  129. label = mat['GTcls'][0]['Segmentation'][0].astype(np.uint8)
  130. label = label[np.newaxis, ...]
  131. return label

每个类都是层,类的名字就是layer参数的名字。这两个都是数据输入层,由于需要一个data,一个label,所以有两个top,没有bottomo。
类直接继承的是caffe.Layer,然后必须重写setup(),reshape(),forward(),backward()函数,其他的函数可以自己定义,没有限制。
setup()是类启动时该做的事情,比如层所需数据的初始化。
reshape()就是取数据然后把它规范化为四维的矩阵。每次取数据都会调用此函数。
forward()就是网络的前向运行,这里就是把取到的数据往前传递,因为没有其他运算。
backward()就是网络的反馈,data层是没有反馈的,所以这里就直接pass。

PS

这里就把一些资料整合起来,以供参考吧。
1、caffe官网现在开始有了点pycaffe的资料,但是鉴于caffe经常更新,不知道什么时候就把它删除,所需摘录到此。
文件: pyloss.py

  1. import caffe
  2. import numpy as np


  3. class EuclideanLossLayer(caffe.Layer):
  4. """ Compute the Euclidean Loss in the same manner as the C++ EuclideanLossLayer to demonstrate the class interface for developing layers in Python. """

  5. def setup(self, bottom, top):
  6. # check input pair
  7. if len(bottom) != 2:
  8. raise Exception("Need two inputs to compute distance.")

  9. def reshape(self, bottom, top):
  10. # check input dimensions match
  11. if bottom[0].count != bottom[1].count:
  12. raise Exception("Inputs must have the same dimension.")
  13. # difference is shape of inputs
  14. self.diff = np.zeros_like(bottom[0].data, dtype=np.float32)
  15. # loss output is scalar
  16. top[0].reshape(1)

  17. def forward(self, bottom, top):
  18. self.diff[...] = bottom[0].data - bottom[1].data
  19. top[0].data[...] = np.sum(self.diff**2) / bottom[0].num / 2.

  20. def backward(self, top, propagate_down, bottom):
  21. for i in range(2):
  22. if not propagate_down[i]:
  23. continue
  24. if i == 0:
  25. sign = 1
  26. else:
  27. sign = -1
  28. bottom[i].diff[...] = sign * self.diff / bottom[i].num

下面这个就是如何使用这个层了:
linreg.prototxt

  1. name: 'LinearRegressionExample'
  2. # define a simple network for linear regression on dummy data
  3. # that computes the loss by a PythonLayer.
  4. layer {
  5. type: 'DummyData'
  6. name: 'x'
  7. top: 'x'
  8. dummy_data_param {
  9. shape: { dim: 10 dim: 3 dim: 2 }
  10. data_filler: { type: 'gaussian' }
  11. }
  12. }
  13. layer {
  14. type: 'DummyData'
  15. name: 'y'
  16. top: 'y'
  17. dummy_data_param {
  18. shape: { dim: 10 dim: 3 dim: 2 }
  19. data_filler: { type: 'gaussian' }
  20. }
  21. }
  22. # include InnerProduct layers for parameters
  23. # so the net will need backward
  24. layer {
  25. type: 'InnerProduct'
  26. name: 'ipx'
  27. top: 'ipx'
  28. bottom: 'x'
  29. inner_product_param {
  30. num_output: 10
  31. weight_filler { type: 'xavier' }
  32. }
  33. }
  34. layer {
  35. type: 'InnerProduct'
  36. name: 'ipy'
  37. top: 'ipy'
  38. bottom: 'y'
  39. inner_product_param {
  40. num_output: 10
  41. weight_filler { type: 'xavier' }
  42. }
  43. }
  44. layer {
  45. type: 'Python'
  46. name: 'loss'
  47. top: 'loss'
  48. bottom: 'ipx'
  49. bottom: 'ipy'
  50. python_param {
  51. # the module name -- usually the filename -- that needs to be in $PYTHONPATH
  52. module: 'pyloss'
  53. # the layer name -- the class name in the module
  54. layer: 'EuclideanLossLayer'
  55. }
  56. # set loss weight so Caffe knows this is a loss layer.
  57. # since PythonLayer inherits directly from Layer, this isn't automatically # known to Caffe loss_weight: 1 }

pascal_multilabel_datalayers.py

  1. # imports
  2. import json
  3. import time
  4. import pickle
  5. import scipy.misc
  6. import skimage.io
  7. import caffe

  8. import numpy as np
  9. import os.path as osp

  10. from xml.dom import minidom
  11. from random import shuffle
  12. from threading import Thread
  13. from PIL import Image

  14. from tools import SimpleTransformer


  15. class PascalMultilabelDataLayerSync(caffe.Layer):

  16. """ This is a simple syncronous datalayer for training a multilabel model on PASCAL. """

  17. def setup(self, bottom, top):

  18. self.top_names = ['data', 'label']

  19. # === Read input parameters ===

  20. # params is a python dictionary with layer parameters.
  21. params = eval(self.param_str)

  22. # Check the paramameters for validity.
  23. check_params(params)

  24. # store input as class variables
  25. self.batch_size = params['batch_size']

  26. # Create a batch loader to load the images.
  27. self.batch_loader = BatchLoader(params, None)

  28. # === reshape tops ===
  29. # since we use a fixed input image size, we can shape the data layer
  30. # once. Else, we'd have to do it in the reshape call.
  31. top[0].reshape(
  32. self.batch_size, 3, params['im_shape'][0], params['im_shape'][1])
  33. # Note the 20 channels (because PASCAL has 20 classes.)
  34. top[1].reshape(self.batch_size, 20)

  35. print_info("PascalMultilabelDataLayerSync", params)

  36. def forward(self, bottom, top):
  37. """ Load data. """
  38. for itt in range(self.batch_size):
  39. # Use the batch loader to load the next image.
  40. im, multilabel = self.batch_loader.load_next_image()

  41. # Add directly to the caffe data layer
  42. top[0].data[itt, ...] = im
  43. top[1].data[itt, ...] = multilabel

  44. def reshape(self, bottom, top):
  45. """ There is no need to reshape the data, since the input is of fixed size (rows and columns) """
  46. pass

  47. def backward(self, top, propagate_down, bottom):
  48. """ These layers does not back propagate """
  49. pass


  50. class BatchLoader(object):

  51. """ This class abstracts away the loading of images. Images can either be loaded singly, or in a batch. The latter is used for the asyncronous data layer to preload batches while other processing is performed. """

  52. def __init__(self, params, result):
  53. self.result = result
  54. self.batch_size = params['batch_size']
  55. self.pascal_root = params['pascal_root']
  56. self.im_shape = params['im_shape']
  57. # get list of image indexes.
  58. list_file = params['split'] + '.txt'
  59. self.indexlist = [line.rstrip('\n') for line in open(
  60. osp.join(self.pascal_root, 'ImageSets/Main', list_file))]
  61. self._cur = 0 # current image
  62. # this class does some simple data-manipulations
  63. self.transformer = SimpleTransformer()

  64. print "BatchLoader initialized with {} images".format(
  65. len(self.indexlist))

  66. def load_next_image(self):
  67. """ Load the next image in a batch. """
  68. # Did we finish an epoch?
  69. if self._cur == len(self.indexlist):
  70. self._cur = 0
  71. shuffle(self.indexlist)

  72. # Load an image
  73. index = self.indexlist[self._cur] # Get the image index
  74. image_file_name = index + '.jpg'
  75. im = np.asarray(Image.open(
  76. osp.join(self.pascal_root, 'JPEGImages', image_file_name)))
  77. im = scipy.misc.imresize(im, self.im_shape) # resize

  78. # do a simple horizontal flip as data augmentation
  79. flip = np.random.choice(2)*2-1
  80. im = im[:, ::flip, :]

  81. # Load and prepare ground truth
  82. multilabel = np.zeros(20).astype(np.float32)
  83. anns = load_pascal_annotation(index, self.pascal_root)
  84. for label in anns['gt_classes']:
  85. # in the multilabel problem we don't care how MANY instances
  86. # there are of each class. Only if they are present.
  87. # The "-1" is b/c we are not interested in the background
  88. # class.
  89. multilabel[label - 1] = 1

  90. self._cur += 1
  91. return self.transformer.preprocess(im), multilabel


  92. def load_pascal_annotation(index, pascal_root):
  93. """ This code is borrowed from Ross Girshick's FAST-RCNN code (https://github.com/rbgirshick/fast-rcnn). It parses the PASCAL .xml metadata files. See publication for further details: (http:///abs/1504.08083). Thanks Ross! """
  94. classes = ('__background__', # always index 0
  95. 'aeroplane', 'bicycle', 'bird', 'boat',
  96. 'bottle', 'bus', 'car', 'cat', 'chair',
  97. 'cow', 'diningtable', 'dog', 'horse',
  98. 'motorbike', 'person', 'pottedplant',
  99. 'sheep', 'sofa', 'train', 'tvmonitor')
  100. class_to_ind = dict(zip(classes, xrange(21)))

  101. filename = osp.join(pascal_root, 'Annotations', index + '.xml')
  102. # print 'Loading: {}'.format(filename)

  103. def get_data_from_tag(node, tag):
  104. return node.getElementsByTagName(tag)[0].childNodes[0].data

  105. with open(filename) as f:
  106. data = minidom.parseString(f.read())

  107. objs = data.getElementsByTagName('object')
  108. num_objs = len(objs)

  109. boxes = np.zeros((num_objs, 4), dtype=np.uint16)
  110. gt_classes = np.zeros((num_objs), dtype=np.int32)
  111. overlaps = np.zeros((num_objs, 21), dtype=np.float32)

  112. # Load object bounding boxes into a data frame.
  113. for ix, obj in enumerate(objs):
  114. # Make pixel indexes 0-based
  115. x1 = float(get_data_from_tag(obj, 'xmin')) - 1
  116. y1 = float(get_data_from_tag(obj, 'ymin')) - 1
  117. x2 = float(get_data_from_tag(obj, 'xmax')) - 1
  118. y2 = float(get_data_from_tag(obj, 'ymax')) - 1
  119. cls = class_to_ind[
  120. str(get_data_from_tag(obj, "name")).lower().strip()]
  121. boxes[ix, :] = [x1, y1, x2, y2]
  122. gt_classes[ix] = cls
  123. overlaps[ix, cls] = 1.0

  124. overlaps = scipy.sparse.csr_matrix(overlaps)

  125. return {'boxes': boxes,
  126. 'gt_classes': gt_classes,
  127. 'gt_overlaps': overlaps,
  128. 'flipped': False,
  129. 'index': index}


  130. def check_params(params):
  131. """ A utility function to check the parameters for the data layers. """
  132. assert 'split' in params.keys(
  133. ), 'Params must include split (train, val, or test).'

  134. required = ['batch_size', 'pascal_root', 'im_shape']
  135. for r in required:
  136. assert r in params.keys(), 'Params must include {}'.format(r)


  137. def print_info(name, params):
  138. """ Ouput some info regarding the class """
  139. print "{} initialized for split: {}, with bs: {}, im_shape: {}.".format(
  140. name,
  141. params['split'],
  142. params['batch_size'],
  143. params['im_shape'])

caffenet.py

  1. from __future__ import print_function
  2. from caffe import layers as L, params as P, to_proto
  3. from caffe.proto import caffe_pb2

  4. # helper function for common structures

  5. def conv_relu(bottom, ks, nout, stride=1, pad=0, group=1):
  6. conv = L.Convolution(bottom, kernel_size=ks, stride=stride,
  7. num_output=nout, pad=pad, group=group)
  8. return conv, L.ReLU(conv, in_place=True)

  9. def fc_relu(bottom, nout):
  10. fc = L.InnerProduct(bottom, num_output=nout)
  11. return fc, L.ReLU(fc, in_place=True)

  12. def max_pool(bottom, ks, stride=1):
  13. return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=ks, stride=stride)

  14. def caffenet(lmdb, batch_size=256, include_acc=False):
  15. data, label = L.Data(source=lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
  16. transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=True))

  17. # the net itself
  18. conv1, relu1 = conv_relu(data, 11, 96, stride=4)
  19. pool1 = max_pool(relu1, 3, stride=2)
  20. norm1 = L.LRN(pool1, local_size=5, alpha=1e-4, beta=0.75)
  21. conv2, relu2 = conv_relu(norm1, 5, 256, pad=2, group=2)
  22. pool2 = max_pool(relu2, 3, stride=2)
  23. norm2 = L.LRN(pool2, local_size=5, alpha=1e-4, beta=0.75)
  24. conv3, relu3 = conv_relu(norm2, 3, 384, pad=1)
  25. conv4, relu4 = conv_relu(relu3, 3, 384, pad=1, group=2)
  26. conv5, relu5 = conv_relu(relu4, 3, 256, pad=1, group=2)
  27. pool5 = max_pool(relu5, 3, stride=2)
  28. fc6, relu6 = fc_relu(pool5, 4096)
  29. drop6 = L.Dropout(relu6, in_place=True)
  30. fc7, relu7 = fc_relu(drop6, 4096)
  31. drop7 = L.Dropout(relu7, in_place=True)
  32. fc8 = L.InnerProduct(drop7, num_output=1000)
  33. loss = L.SoftmaxWithLoss(fc8, label)

  34. if include_acc:
  35. acc = L.Accuracy(fc8, label)
  36. return to_proto(loss, acc)
  37. else:
  38. return to_proto(loss)

  39. def make_net():
  40. with open('train.prototxt', 'w') as f:
  41. print(caffenet('/path/to/caffe-train-lmdb'), file=f)

  42. with open('test.prototxt', 'w') as f:
  43. print(caffenet('/path/to/caffe-val-lmdb', batch_size=50, include_acc=True), file=f)

  44. if __name__ == '__main__':
  45. make_net()
  46. tools.py
  47. import numpy as np


  48. class SimpleTransformer:

  49. """ SimpleTransformer is a simple class for preprocessing and deprocessing images for caffe. """

  50. def __init__(self, mean=[128, 128, 128]):
  51. self.mean = np.array(mean, dtype=np.float32)
  52. self.scale = 1.0

  53. def set_mean(self, mean):
  54. """ Set the mean to subtract for centering the data. """
  55. self.mean = mean

  56. def set_scale(self, scale):
  57. """ Set the data scaling. """
  58. self.scale = scale

  59. def preprocess(self, im):
  60. """ preprocess() emulate the pre-processing occuring in the vgg16 caffe prototxt. """

  61. im = np.float32(im)
  62. im = im[:, :, ::-1] # change to BGR
  63. im -= self.mean
  64. im *= self.scale
  65. im = im.transpose((2, 0, 1))

  66. return im

  67. def deprocess(self, im):
  68. """ inverse of preprocess() """
  69. im = im.transpose(1, 2, 0)
  70. im /= self.scale
  71. im += self.mean
  72. im = im[:, :, ::-1] # change to RGB

  73. return np.uint8(im)


  74. class CaffeSolver:

  75. """ Caffesolver is a class for creating a solver.prototxt file. It sets default values and can export a solver parameter file. Note that all parameters are stored as strings. Strings variables are stored as strings in strings. """

  76. def __init__(self, testnet_prototxt_path="testnet.prototxt", trainnet_prototxt_path="trainnet.prototxt", debug=False):

  77. self.sp = {}

  78. # critical:
  79. self.sp['base_lr'] = '0.001'
  80. self.sp['momentum'] = '0.9'

  81. # speed:
  82. self.sp['test_iter'] = '100'
  83. self.sp['test_interval'] = '250'

  84. # looks:
  85. self.sp['display'] = '25'
  86. self.sp['snapshot'] = '2500'
  87. self.sp['snapshot_prefix'] = '"snapshot"' # string withing a string!

  88. # learning rate policy
  89. self.sp['lr_policy'] = '"fixed"'

  90. # important, but rare:
  91. self.sp['gamma'] = '0.1'
  92. self.sp['weight_decay'] = '0.0005'
  93. self.sp['train_net'] = '"' + trainnet_prototxt_path + '"'
  94. self.sp['test_net'] = '"' + testnet_prototxt_path + '"'

  95. # pretty much never change these.
  96. self.sp['max_iter'] = '100000'
  97. self.sp['test_initialization'] = 'false'
  98. self.sp['average_loss'] = '25' # this has to do with the display.
  99. self.sp['iter_size'] = '1' # this is for accumulating gradients

  100. if (debug):
  101. self.sp['max_iter'] = '12'
  102. self.sp['test_iter'] = '1'
  103. self.sp['test_interval'] = '4'
  104. self.sp['display'] = '1'

  105. def add_from_file(self, filepath):
  106. """ Reads a caffe solver prototxt file and updates the Caffesolver instance parameters. """
  107. with open(filepath, 'r') as f:
  108. for line in f:
  109. if line[0] == '#':
  110. continue
  111. splitLine = line.split(':')
  112. self.sp[splitLine[0].strip()] = splitLine[1].strip()

  113. def write(self, filepath):
  114. """ Export solver parameters to INPUT "filepath". Sorted alphabetically. """
  115. f = open(filepath, 'w')
  116. for key, value in sorted(self.sp.items()):
  117. if not(type(value) is str):
  118. raise TypeError('All solver parameters must be strings')
  119. f.write('%s: %s\n' % (key, value))

    本站是提供个人知识管理的网络存储空间,所有内容均由用户发布,不代表本站观点。请注意甄别内容中的联系方式、诱导购买等信息,谨防诈骗。如发现有害或侵权内容,请点击一键举报。
    转藏 分享 献花(0

    0条评论

    发表

    请遵守用户 评论公约

    类似文章 更多