Model saving and reading

1.tensorflow Save and read models :tf.train.Saver()     .save()
# To save a model save function save( sess, save_path, global_step=None, latest_filename=None,
meta_graph_suffix='meta', write_meta_graph=True, write_state=True ) ''' sess:
Saving the model requires a session with the calculation diagram loaded , And all variables must be initialized . save_path: Model saving path and name global_step:
If provided , This number will be added to save_path behind , Results used to distinguish different training stages ''' # example import tensorflow as tf import
numpy as np import os # use numpy Generate data x_data = np.linspace(-1,1,300)[:, np.newaxis]
# Transposition noise = np.random.normal(0,0.05, x_data.shape) y_data =
np.square(x_data)-0.5+noise # Input layer x_ph = tf.placeholder(tf.float32, [None, 1])
y_ph = tf.placeholder(tf.float32, [None, 1]) # Hide layer w1 =
tf.Variable(tf.random_normal([1,10])) b1 = tf.Variable(tf.zeros([1,10])+0.1)
wx_plus_b1 = tf.matmul(x_ph, w1) + b1 hidden = tf.nn.relu(wx_plus_b1) # Output layer w2 =
tf.Variable(tf.random_normal([10,1])) b2 = tf.Variable(tf.zeros([1,1])+0.1)
wx_plus_b2 = tf.matmul(hidden, w2) + b2 y = wx_plus_b2 # loss loss =
tf.reduce_mean(tf.reduce_sum(tf.square(y_ph-y),reduction_indices=[1])) train_op
= tf.train.GradientDescentOptimizer(0.1).minimize(loss) # Save model objects saver saver =
tf.train.Saver() # Determine whether the model saving path exists , Create if it doesn't exist if not os.path.exists('tmp/'):
os.mkdir('tmp/') # initialization with tf.Session() as sess: if
os.path.exists('tmp/checkpoint'): # Judge whether the model exists saver.restore(sess,
'tmp/model.ckpt') # Restore variables from the model if they exist else: init = tf.global_variables_initializer()
# Initialize variables if they don't exist sess.run(init) for i in range(1000): _,loss_value =
sess.run([train_op,loss], feed_dict={x_ph:x_data, y_ph:y_data}) if(i%50==0):
save_path = saver.save(sess, 'tmp/model.ckpt') print(" Number of iterations :%d , Loss of training :%s"%(i,
loss_value))
 

After each save operation is created 3 Data files and create a checkpoint (checkpoint) file , Simple understanding is that parameters such as weight are saved to .chkp.data
In file , In the form of a dictionary ; Graph and metadata are saved to .chkp.meta In file , Can be tf.train.import_meta_graph Load to current default graph .

2.keras Save and read models

* model.save(filepath), Save at the same time model And weighted import numpy as np from keras.datasets
import mnist from keras.utils import np_utils from keras.models import
Sequential from keras.layers import Dense from keras.optimizers import SGD #
Load data (x_train,y_train),(x_test,y_test) = mnist.load_data() # (60000,28,28)
print('x_shape:',x_train.shape) # (60000) print('y_shape:',y_train.shape) #
(60000,28,28)->(60000,784) x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0 # change one hot format y_train =
np_utils.to_categorical(y_train,num_classes=10) y_test =
np_utils.to_categorical(y_test,num_classes=10) # Create model , input 784 Neurons , output 10 Neurons model
= Sequential([
Dense(units=10,input_dim=784,bias_initializer='one',activation='softmax') ]) #
Define optimizer sgd = SGD(lr=0.2) # Define optimizer ,loss function, Calculation accuracy during training model.compile(
optimizer = sgd, loss = 'mse', metrics=['accuracy'], ) # Training model
model.fit(x_train,y_train,batch_size=64,epochs=5) # Evaluation model loss,accuracy =
model.evaluate(x_test,y_test) print('\ntest loss',loss)
print('accuracy',accuracy) # Save model model.save('model.h5') # Model retraining loaded with initial training mport
numpy as np from keras.datasets import mnist from keras.utils import np_utils
from keras.models import Sequential from keras.layers import Dense from
keras.optimizers import SGD from keras.models import load_model # Load data
(x_train,y_train),(x_test,y_test) = mnist.load_data() # (60000,28,28)
print('x_shape:',x_train.shape) # (60000) print('y_shape:',y_train.shape) #
(60000,28,28)->(60000,784) x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0 # change one hot format y_train =
np_utils.to_categorical(y_train,num_classes=10) y_test =
np_utils.to_categorical(y_test,num_classes=10) # Load model model =
load_model('model.h5') # Evaluation model loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss) print('accuracy',accuracy) # Training model
model.fit(x_train,y_train,batch_size=64,epochs=2) # Evaluation model loss,accuracy =
model.evaluate(x_test,y_test) print('\ntest loss',loss)
print('accuracy',accuracy) # Save parameters , Load parameters
model.save_weights('my_model_weights.h5')
model.load_weights('my_model_weights.h5') # Save network structure , Load network structure from keras.models
import model_from_json json_string = model.to_json() model =
model_from_json(json_string) print(json_string)
* By creating ModelCheckpoint Class to set checkpoints from __future__ import print_function import
numpy as np from keras.models import Sequential from keras.layers.core import
Dense, Activation from keras.optimizers import SGD from keras.utils import
np_utils from keras.callbacks import ModelCheckpoint # Random number seed , Repeatability settings
np.random.seed(1671) # Network structure and training parameters NB_EPOCH = 40 BATCH_SIZE = 128 VERBOSE = 1
NB_CLASSES = 10 OPTIMIZER = SGD() N_HIDDEN = 128 VALIDATION_SPLIT = 0.2
RESHAPED = 784 # Load data def load_data(path="mnist.npz"): f = np.load(path)
x_train, y_train = f['x_train'], f['y_train'] x_test, y_test = f['x_test'],
f['y_test'] f.close() return (x_train, y_train), (x_test, y_test) # Call function to load data
(x_train, y_train), (x_test, y_test) = load_data() # Data preprocessing (x_train, y_train),
(x_test, y_test) = load_data() # Data deformation , Type conversion and normalization x_train = x_train.reshape(60000,
784).astype('float32') / 255 x_test = x_test.reshape(10000,
784).astype('float32') / 255 # Print message print('Training samples:', x_train.shape)
print('Testing samples:', x_test.shape) # Convert category to one-hot code y_train =
np_utils.to_categorical(y_train, NB_CLASSES) y_test =
np_utils.to_categorical(y_test, NB_CLASSES) # Define network structure model = Sequential()
model.add(Dense(N_HIDDEN, input_shape=(RESHAPED, )))
model.add(Activation('relu')) model.add(Dense(N_HIDDEN))
model.add(Activation('relu')) model.add(Dense(NB_CLASSES))
model.add(Activation('softmax')) # Compile model
model.compile(loss='categorical_crossentropy', optimizer=OPTIMIZER,
metrics=['accuracy']) # Set checkpoint filepath =
'saved_models/weights-improvement-{epoch:02d}-{val_acc:.5f}.hdf5' checkpoint =
ModelCheckpoint(filepath=filepath, monitor='val_acc', verbose=VERBOSE,
save_best_only=True, mode='max') # Training model history = model.fit(x_train, y_train,
batch_size=BATCH_SIZE, epochs=NB_EPOCH, verbose=VERBOSE,
validation_split=VALIDATION_SPLIT, callbacks=[checkpoint]) # Evaluation model score =
model.evaluate(x_test, y_test, verbose=VERBOSE) print('Test score:', score[0])
print('Test accuracy:', score[1]) # This method can save the model weight corresponding to the optimal evaluation result , Last file
Data saving and reading

Data storage mode :json file ,csv file ,MySQL data base ,Redis Database and Mongdb data base

python use print Output data to Notepad txt/csv:
f=open('out.txt','w') for i in range(0,5): print(i,file=f) f.close()
#f=open('results/csv/out.csv','w') #for i in range(0,5): # print(i,file=f)
#f.close()

Technology
©2019-2020 Toolsou All rights reserved,
JS How to operate java Realize the function of grabbing red packets C Language programming to find a student's grade The United Nations 《 Glory of Kings 》 Please go to the studio : To save the earth Dialogue between apple and Nissan suspended ,Apple Car How's it going ?CSS architecture design China's longest high speed rail officially opened ! The fastest way to finish the race 30.5 hour First knowledge MySQL Comprehensive review ( dried food )2021 year 1 Monthly programmer salary statistics , average 14915 element How to use it quickly html and css Write static page