博客
关于我
强烈建议你试试无所不能的chatGPT,快点击我
字符是识别---10--源数据20*20--训练样本0~A----基于TensorFlow+CNN实现
阅读量:4298 次
发布时间:2019-05-27

本文共 3823 字,大约阅读时间需要 12 分钟。

#coding=utf-8import os#图像读取库from PIL import Image#矩阵运算库import numpy as npimport tensorflow as tf# 训练还是测试train = False #True False# 数据文件夹if train:    data_dir = "data"else:    data_dir = "test"# 模型文件路径model_path = "model/image_model"# 从文件夹读取图片和标签到numpy数组中# 标签信息在文件名中,例如1_40.jpg表示该图片的标签为1def read_data(data_dir):    datas = []    labels = []    fpaths = []    for fname in os.listdir(data_dir):        fpath = os.path.join(data_dir, fname)        fpaths.append(fpath)        image = Image.open(fpath)        print(fpath)        data = np.array(image) / 255.0        label = int(fname.split("_")[0])        #label = fname.split("_")[0]        datas.append(data)        labels.append(label)    datas = np.array(datas)    labels = np.array(labels)    print("shape of datas: {}\tshape of labels: {}".format(datas.shape, labels.shape))    return fpaths, datas, labelsfpaths, datas, labels = read_data(data_dir)# 计算有多少类图片num_classes = len(set(labels))# 定义Placeholder,存放输入和标签datas_placeholder = tf.placeholder(tf.float32, [None, 20, 20, 3])labels_placeholder = tf.placeholder(tf.int32, [None])# 存放DropOut参数的容器,训练时为0.25,测试时为0dropout_placeholdr = tf.placeholder(tf.float32)# 定义卷积层, 20个卷积核, 卷积核大小为5,用Relu激活conv0 = tf.layers.conv2d(datas_placeholder, 20, 5, activation=tf.nn.relu)# 定义max-pooling层,pooling窗口为2x2,步长为2x2pool0 = tf.layers.max_pooling2d(conv0, [2, 2], [2, 2])# 定义卷积层, 40个卷积核, 卷积核大小为4,用Relu激活conv1 = tf.layers.conv2d(pool0, 40, 4, activation=tf.nn.relu)# 定义max-pooling层,pooling窗口为2x2,步长为2x2pool1 = tf.layers.max_pooling2d(conv1, [2, 2], [2, 2])# 将3维特征转换为1维向量flatten = tf.layers.flatten(pool1)# 全连接层,转换为长度为100的特征向量fc = tf.layers.dense(flatten, 400, activation=tf.nn.relu)# 加上DropOut,防止过拟合dropout_fc = tf.layers.dropout(fc, dropout_placeholdr)# 未激活的输出层logits = tf.layers.dense(dropout_fc, num_classes)predicted_labels = tf.arg_max(logits, 1)# 利用交叉熵定义损失losses = tf.nn.softmax_cross_entropy_with_logits(    labels=tf.one_hot(labels_placeholder, num_classes),    logits=logits)# 平均损失mean_loss = tf.reduce_mean(losses)# 定义优化器,指定要优化的损失函数optimizer = tf.train.AdamOptimizer(learning_rate=1e-2).minimize(losses)# 用于保存和载入模型saver = tf.train.Saver()with tf.Session() as sess:    if train:        print("训练模式")        # 如果是训练,初始化参数        sess.run(tf.global_variables_initializer())        # 定义输入和Label以填充容器,训练时dropout为0.25        train_feed_dict = {            datas_placeholder: datas,            labels_placeholder: labels,            dropout_placeholdr: 0.25        }        for step in range(150):            _, mean_loss_val = sess.run([optimizer, mean_loss], feed_dict=train_feed_dict)            if step % 10 == 0:                print("step = {}\tmean loss = {}".format(step, mean_loss_val))        saver.save(sess, model_path)        print("训练结束,保存模型到{}".format(model_path))    else:        print("测试模式")        # 如果是测试,载入参数        saver.restore(sess, model_path)        print("从{}载入模型".format(model_path))        # label和名称的对照关系        label_name_dict = {            0: "0",            1: "1",            2: "2",            3: "3",            4: "4",            5: "5",            6: "6",            7: "7",            8: "8",            9: "9",            10: "A"        }        # 定义输入和Label以填充容器,测试时dropout为0        test_feed_dict = {            datas_placeholder: datas,            labels_placeholder: labels,            dropout_placeholdr: 0        }        predicted_labels_val = sess.run(predicted_labels, feed_dict=test_feed_dict)        # 真实label与模型预测label        for fpath, real_label, predicted_label in zip(fpaths, labels, predicted_labels_val):            # 将label id转换为label名            real_label_name = label_name_dict[real_label]            predicted_label_name = label_name_dict[predicted_label]            print("{}\t{} => {}".format(fpath, real_label_name, predicted_label_name))

 

测试结果,感觉结果还行,错了一个  “3-----> 2”

转载地址:http://qlsws.baihongyu.com/

你可能感兴趣的文章
Hive安装前扫盲之Derby和Metastore
查看>>
永久修改PATH环境变量的几种办法
查看>>
大数据学习之HDP SANDBOX开始学习
查看>>
Hive Beeline使用
查看>>
Centos6安装图形界面(hdp不需要,hdp直接从github上下载数据即可)
查看>>
CentOS7 中把yum源更换成163源
查看>>
关于yum Error: Cannot retrieve repository metadata (repomd.xml) for repository:xxxxxx.
查看>>
linux下载github中的文件
查看>>
HDP Sandbox里面git clone不了数据(HTTP request failed)【目前还没解决,所以hive的练习先暂时搁置了】
查看>>
动态分区最佳实践(一定要注意实践场景)
查看>>
HIVE—索引、分区和分桶的区别
查看>>
Hive进阶总结(听课总结)
查看>>
大数据领域两大最主流集群管理工具Ambari和Cloudera Manger
查看>>
Sqoop往Hive导入数据实战
查看>>
Mysql到HBase的迁移
查看>>
Sqoop import进阶
查看>>
Hive语句是如何转化成MapReduce任务的
查看>>
Hive创建table报错:Permission denied: user=lenovo, access=WRITE, inode="":suh:supergroup:rwxr-xr-x
查看>>
Hive执行job时return code 2排查
查看>>
hive常用函数及数据结构介绍
查看>>