Mais conteúdo relacionado
Semelhante a AI入門「第3回:数学が苦手でも作って使えるKerasディープラーニング」【旧版】※新版あります (20)
AI入門「第3回:数学が苦手でも作って使えるKerasディープラーニング」【旧版】※新版あります
- 18. 17
4.KerasでのML開発入門
以下を「not.py」というファイルで保存します
(コード内容の解説は、実行後に行います)
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
np.random.seed( 0 )
model = Sequential( [ Dense( input_dim = 1, units = 1 ), Activation( 'sigmoid' ) ] )
model.compile( loss = 'binary_crossentropy', optimizer = SGD( lr = 0.1 ) )
x = np.array( [ [ 1 ], [ 0 ] ] )
y = np.array( [ [ 0 ], [ 1 ] ] )
model.fit( x, y, epochs = 200, batch_size = 1 )
print()
classes = model.predict_classes( x, batch_size = 1 )
print()
print( y == classes )
probably = model.predict_proba( x, batch_size = 1 )
print()
print( probably )
- 25. 24
4.KerasでのML開発入門
「not.py」を元に、下記赤枠を変更し、「or.py」で保存します
(「input_dim」を2次元に変更し、xとyを書き換える)
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
np.random.seed( 0 )
model = Sequential( [ Dense( input_dim = 2, units = 1 ), Activation( 'sigmoid' ) ] )
model.compile( loss = 'binary_crossentropy', optimizer = SGD( lr = 0.1 ) )
x = np.array( [ [ 0, 0 ], [ 0, 1 ], [ 1, 0 ], [ 1, 1 ] ] )
y = np.array( [ [ 0 ], [ 1 ], [ 1 ], [ 1 ] ] )
model.fit( x, y, epochs = 200, batch_size = 1 )
print()
classes = model.predict_classes( x, batch_size = 1 )
print()
print( classes )
probably = model.predict_proba( x, batch_size = 1 )
print()
print( probably )
- 31. 30
5.2層ニューラルネットワークでXOR
「or.py」を元に、下記赤枠を変更し、「xor.py」で保存します
(modelの構成・記述を変更し、yと試行数を書き換え)
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
np.random.seed( 0 )
model = Sequential()
model.add( Dense( input_dim = 2, units = 2 ) )
model.add( Activation( 'sigmoid' ) )
model.add( Dense( units = 1 ) )
model.add( Activation( 'sigmoid' ) )
model.compile( loss = 'binary_crossentropy', optimizer = SGD( lr = 0.1 ) )
x = np.array( [ [ 0, 0 ], [ 0, 1 ], [ 1, 0 ], [ 1, 1 ] ] )
y = np.array( [ [ 0 ], [ 1 ], [ 1 ], [ 0 ] ] )
model.fit( x, y, epochs = 800, batch_size = 1 )
print()
classes = model.predict_classes( x, batch_size = 1 )
print()
print( classes )
probably = model.predict_proba( x, batch_size = 1 )
print()
print( probably )
- 37. 36
6.ディープラーニングで時系列分析
最初の時系列データは、カンタンな、ノイズ入りsin波にします
以下でノイズ入りsin波をグラフ表示します
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers.recurrent import LSTM
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
def sin( x, T = 100 ):
return np.sin( 2.0 * np.pi * x / T )
def noisy_sin( T = 100, ampl = 0.05 ):
x = np.arange( 0, 2 * T + 1 )
noise = ampl * np.random.uniform( low = - 1.0, high = 1.0, size = len( x ) )
return sin( x ) + noise
f = noisy_sin()
plt.figure()
plt.plot( range( 0, len( f ) ), f, color = "b", label = "row" )
plt.legend()
plt.show()
# pip install matplotlib
# python predict_sin.py
predict_sin.py
- 39. 38
6.ディープラーニングで時系列分析
(続き) RNN用にデータを分割し、モデルを構築します
…
def make_dataset( datas, n_prev = 100 ):
data = []
target = []
maxlen = 25
for i in range( len( datas ) - maxlen ):
data.append( datas[ i: i + maxlen ] )
target.append( datas[ i + maxlen ] )
re_data = np.array( data ).reshape( len( data ), maxlen, 1 )
re_target = np.array( target ).reshape( len( data ), 1 )
return re_data, re_target
g, h = make_dataset( f )
length_of_sequence = x.shape[ 1 ]
in_out_neurons = 1
n_hidden = 300
model = Sequential()
model.add( LSTM( n_hidden, batch_input_shape =
( None, length_of_sequence, in_out_neurons ), return_sequences = False ) )
model.add( Dense( in_out_neurons ) )
model.add( Activation( "linear" ) )
optimizer = Adam( lr = 0.001 )
model.compile( loss = "mean_squared_error", optimizer = optimizer )
early_stopping = EarlyStopping( monitor = 'val_loss', mode = 'min', patience = 20 )
…
predict_sin.py
- 40. 39
6.ディープラーニングで時系列分析
(続き) 学習させ、未来データを予測し、グラフ表示します
…
model.fit( x, y,
batch_size = 300,
epochs = 100,
validation_split = 0.1,
callbacks = [ early_stopping ]
)
future_test = g[ 175 ].T
time_length = future_test.shape[ 1 ]
future_result = np.empty( ( 0 ) )
for step2 in range( 400 ):
test_data = np.reshape( future_test, ( 1, time_length, 1 ) )
batch_predict = model.predict( test_data )
future_test = np.delete( future_test, 0 )
future_test = np.append( future_test, batch_predict )
future_result = np.append( future_result, batch_predict )
predicted = model.predict( x )
plt.figure()
plt.plot( range( 0, len( f ) ), f, color = "b", label = "sin" )
plt.plot( range( 25, len( predicted ) + 25 ), predicted, color = "r", label = "predict" )
plt.plot( range( 0 + len( f ), len( future_result ) + len( f ) ), future_result, color = "g", label = "future" )
plt.legend()
plt.show()
predict_sin.py