%run './../interface/hyperspectral_image.ipynb'
Note that here Y and Yhat are OneHot encoded!
%run './../interface/tensorflow_defaults.ipynb'
%run './../interface/tensorflow_functions.ipynb'
XTrain = tf.constant(value=hyimage.splitPixels(stratified=True, trainProportion=0.2).XTrain, dtype=tf.float64, name='XTrain')
YTrain = tf.constant(value=hyimage.labelBinEncode(hyimage.splitPixels(stratified=True, trainProportion=0.2).YTrain), dtype=tf.int32, name='YTrain')
YTrain.eval()[0:5]
X = tf.placeholder_with_default(input=XTrain, shape=None, name='X')
Y = tf.placeholder_with_default(input=YTrain, shape=None, name='Y')
W = tf.Variable(tf.truncated_normal([hyimage.bandAmount, 10], seed=RANDOM_STATE, dtype=tf.float64), dtype=tf.float64, name='W')
b = tf.Variable(tf.truncated_normal([10], seed=RANDOM_STATE, dtype=tf.float64), dtype=tf.float64, name='b')
YhatSoftmax = tf.nn.softmax(tf.matmul(X, W) + b, name='YhatSoftmax')
Yhat = tf.one_hot(indices=tf.argmax(YhatSoftmax, dimension=1), depth=tf.shape(YTrain).eval()[1], dtype=tf.int32, name='Y')
cross_entropy = tf.reduce_mean(-tf.reduce_sum(tf.cast(Y, tf.float64) * tf.log(YhatSoftmax), reduction_indices=[1]))
tf.initialize_all_variables().run()
train_step = tf.train.GradientDescentOptimizer(2.5).minimize(cross_entropy)
train_step.run()
cross_entropy.eval()
tfMinimize(_steps=1000000, _train_step=train_step, _loss=cross_entropy)
cross_entropy.eval()
W.eval()[0:2]
Yhat.eval()[0]
hyimage.labelBinDecode(Yhat.eval())
%run './../interface/model_evaluation_functions.ipynb'
tfAccuracyOneHotEncoded(YTensorOneHot=Y, YHatTensorOneHot=Yhat)
feedTest = {X: hyimage.splitPixels(stratified=True, trainProportion=0.2).XTest,
Y: hyimage.labelBinEncode(hyimage.splitPixels(stratified=True, trainProportion=0.2).YTest)}
# same result as with tfAccuracyOneHotEncoded
# pyAccuracy(feedTest[Y], Yhat.eval(feed_dict=feedTest))
compareLabels(originalLabels=hyimage.splitPixels(stratified=True, trainProportion=0.2).YTest,
predictedLabels=hyimage.labelBinDecode(Yhat.eval(feed_dict=feedTest)))
tfAccuracyOneHotEncoded(YTensorOneHot=Y, YHatTensorOneHot=Yhat, feed=feedTest)
feedAll = {X: hyimage.pixelsScaled, Y: hyimage.labelBinEncode(hyimage.pixelLabels)}
hyImageCompare(hyimage, hyimage.labelBinDecode(Yhat.eval(feed_dict=feedAll)).reshape(hyimage.labels.shape))
from sklearn.linear_model import LogisticRegression
# regluarization parameter C=10e2 seems to be the best for the test data.
lReg = LogisticRegression(solver='newton-cg', max_iter=1000, C=10e2, n_jobs=-1)
lReg.fit(X=hyimage.splitPixels().XTrain, y=hyimage.splitPixels().YTrain.ravel())
pyAccuracy(Y=hyimage.splitPixels().YTrain, Yhat=lReg.predict(X=hyimage.splitPixels().XTrain))
pyAccuracy(Y=hyimage.splitPixels().YTest, Yhat=lReg.predict(X=hyimage.splitPixels().XTest))
hyImageCompare(hyimage, lReg.predict(X=hyimage.pixelsScaled).reshape(hyimage.labels.shape))