阿里云平台cifar10代码解析

  • 时间:
  • 浏览:1
  • 来源:uu快3下载网址_uu快3IOS下载_电脑版

    network = conv_2d(network, 64, 3, activation='relu')

    predict_pic = os.path.join(FLAGS.buckets, "bird_bullocks_oriole.jpg")

    print (prediction[0])

#然后 大于3.0版本  sys.version_info返回sys.version_info

            Y_train = labels

    # predict_pic = os.path.join(FLAGS.buckets, "bird_mount_bluebird.jpg")

            Y_train = np.concatenate([Y_train, labels], axis=0)

                         X_train[:, 2048:])) / 255.

            sys.stderr.write("\n")

    parser.add_argument('--buckets', type=str, default='', help='input data path')

if __name__ == '__main__':

        percent = readsofar * 1e2 / totalsize

    # print (prediction[0])

        Y_test = to_categorical(Y_test, 10)

    return (X_train, Y_train), (X_test, Y_test)

    img_prep = ImagePreprocessing()

    # img = tf.image.decode_jpeg(raw_data, 3)

print(model_path)

#reporthook from stackoverflow #13881092

    model = tflearn.DNN(network, tensorboard_verbose=0)

            percent, len(str(totalsize)), readsofar, totalsize)

    X_train = np.reshape(X_train, [-1, 32, 32, 3])

    parser = argparse.ArgumentParser()

    img_aug.add_random_rotation(max_angle=25.)#按随机层厚旋转,最大旋转层厚25

                         learning_rate=0.001)

    prediction = model.predict([img])

                         loss='categorical_crossentropy',

    # prediction = model.predict([img])

    # img = scipy.ndimage.imread(predict_pic, mode="RGB")

import tensorflow as tf

        d = pickle.loads(object, encoding='latin1') 

        data, labels = load_batch(fpath)

    if totalsize > 0:

  #checkpointDir='oss://.../.../.../check_point/model/')

    # input_file_queue = tf.train.string_input_producer(file_paths)

    print (prediction[0])

dirname = os.path.join(FLAGS.buckets, "")

import numpy as np

    img_aug.add_random_flip_leftright()#随机左右翻转

#print('dirname:',dirname)

    #origin_bytes = bytes(object, encoding='latin1')

    network = input_data(shape=[None, 32, 32, 3],

    X_train = []

#(X_train[:, :1024], X_train[:, 1024:2048],X_train[:, 2048:])tup

from __future__ import division, print_function, absolute_import __future__ 

        # Python2

import scipy

    X_test = np.reshape(X_test, [-1, 32, 32, 3])

    img_obj = file_io.read_file_to_string(predict_pic)

#沿着某个轴拼接矩阵

        if i == 1:

    file_io.write_string_to_file("bird_bullocks_oriole.jpg", img_obj)

#然后 模块是被直接运行的,则代码块被运行,然后 模块是被导入的,则代码块不被运行

    # img = tf.image.resize_images(img, [32, 32])

    # Predict

    # img = scipy.misc.imresize(img, (32, 32), interp="bicubic").astype(np.float32, casting='unsafe')

    labels = d["labels"]

def load_batch(fpath):

Tflearn  https://github.com/tflearn/tflearn

阿里提供的代码,然后 没法 解释,花了好长时间才把上边都看个最少,但还未详细掌握。共享我的见解,也请看的同志帮忙修正,共勉!

#将所有片段拼接在并肩,返回的是ndarray

        Y_train = to_categorical(Y_train, 10)

    #uint8  无符号整数,0 至 255,补救后,每个元素都小于等于1

from tflearn.data_utils import shuffle, to_categorical

#tflearn.data_utils.to_categoricaly,nb_classes),y矩阵,nb_classes分类数

    model_path = os.path.join(FLAGS.checkpointDir, "model.tfl")

    # model.fit(X, Y, n_epoch=1150, shuffle=True, validation_set=(X_test, Y_test),

#checkpointDir='oss://.../.../.../check_point/model/')

    img_aug = ImageAugmentation()

            X_train = np.concatenate([X_train, data], axis=0)

    print ("This is a %s"%(num[prediction[0].tolist().index(max(prediction[0]))]))

        d = pickle.loads(object)

#print('model_path:',model_path)

    # Train using classifier

    network = max_pool_2d(network, 2)

import argparse

#3通道分离shape为(111500,1024,3),为reshape做准备

    #print (prediction[0])

from tflearn.layers.conv import conv_2d, max_pool_2d

#经过解压得知 'data_batch_' + str(i)得到的是训练文件

#模块是带有python未来结构的模块,就我应该 用的是python2,那你就能够通过导入很多模块使用python3的结构

#反序列化。。。尝试将object = file_io.read_file_to_string(fpath) 

#录入文件路径,返回data,labels

#dirname: oss://.../.../.../.../

    else: # total size is unknown

from tflearn.data_preprocessing import ImagePreprocessing

    # Real-time data augmentation

    #print (prediction[0].index(max(prediction[0])))

if one_hot:

    model.load(model_path)

import sys

    X_test = np.dstack((X_test[:, :1024], X_test[:, 1024:2048],

def load_data(dirname, one_hot=False):

from tflearn.data_augmentation import ImageAugmentation

    X_train = np.dstack((X_train[:, :1024], X_train[:, 1024:2048],

    X_test, Y_test = load_batch(fpath)

        if readsofar >= totalsize: # near the end

    network = conv_2d(network, 64, 3, activation='relu')

for i in range(1, 6):

#文件内容转化成字符串然后 字节fpath需是文件路径

    Y_test = to_categorical(Y_test, 10)

    # file_paths = tf.train.match_filenames_once(predict_pic)

    fpath = os.path.join(dirname, 'test_batch')

    return data, labels

if sys.version_info > (3, 0):

    # Scale it to 32x32

    # file_path, raw_data = reader.read(input_file_queue)

import os

        sys.stderr.write("read %d\n" % (readsofar,))

#1,2,3,4,5

    # Convolutional network building

def reporthook(blocknum, blocksize, totalsize):

import tflearn

#改成pickle.dumps()进行序列化  encoding="bytes"

#generic entry point script 通用入口点脚本

    network = regression(network, optimizer='adam',

    readsofar = blocknum * blocksize

    parser.add_argument('--checkpointDir', type=str, default='',help='output model path')

def main(_):

from tflearn.layers.core import input_data, dropout, fully_connected

    # Real-time data preprocessing

#print('FLAGS1:',FLAGS)  当前存储地址

    img = scipy.misc.imresize(img, (32, 32), interp="bicubic").astype(np.float32, casting='unsafe')

Y = to_categorical(Y, 10)

#根据必须,看与非 要转化成独热编码

                        X_test[:, 2048:])) / 255.

FLAGS = None

    network = dropout(network, 0.5)

    num=['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']

        # Python3

#np.dstack(tup)等价于np.concatenate(tup,axis=2)即在第三维进行拼接

object = file_io.read_file_to_string(fpath) 

    network = fully_connected(network, 512, activation='relu')

        else:

    print("load data done")

#major=3,minor=6,micro=2,releaselevel=final,serial=0

    #读取图片文件,转化成RGB模式,返回(0,255)的数组

    network = fully_connected(network, 10, activation='softmax')

    else:

#data_augmentation土最好的办法与data_preprocessing土最好的办法在训练阶段类似于 ,详见#data_augmentation。对input_data土最好的办法补救

    Y_train = []

    network = conv_2d(network, 32, 3, activation='relu')

#录入文件并得到data, labels

        fpath = os.path.join(dirname, 'data_batch_' + str(i))

#连接:将dirname和上边的'data_batch_' + str(i)进行拼接,得到文件夹中文件的路径

    img = scipy.ndimage.imread("bird_bullocks_oriole.jpg", mode="RGB")

#run(main=None,argv=None)tf的固定格式

        sys.stderr.write(s)#重定向标准错误信息

            X_train = data

    img_prep.add_featurewise_stdnorm()#标准主次 standard deviation

from six.moves import urllib

import tarfile

  #Namespace(buckets='oss://.../.../.../.../', 

#tflearn.data_utils.shuffle*arrs)每个矩阵按第一维一致打乱

X, Y = shuffle(X, Y)

    # with open(fpath, 'rb') as f:

                         data_preprocessing=img_prep,

    data = d["data"]#data.shape (111500,15072)

    (X, Y), (X_test, Y_test) = load_data(dirname)

import pickle

                         data_augmentation=img_aug)

from tflearn.layers.estimator import regression

tf.app.run(main=main)

#stack(堆叠)

    #           show_metric=True, batch_size=96, run_id='cifar10_cnn')

    # reader = tf.WholeFileReader()

    # prediction = model.predict([img])

        s = "\r%5.1f%% %*d / %d" % (

    network = max_pool_2d(network, 2)

    # predict_pic = os.path.join(FLAGS.buckets, "bird_mount_bluebird.jpg")

    #"bicubic"双三次插值

FLAGS, _ = parser.parse_known_args()

    img_prep.add_featurewise_zero_center()#零中心分布

#Namespace(buckets='oss://.../.../.../.../', 

from tensorflow.python.lib.io import file_io

##model_path: #oss://.../.../.../check_point/model/model.tf2