- android - 多次调用 OnPrimaryClipChangedListener
- android - 无法更新 RecyclerView 中的 TextView 字段
- android.database.CursorIndexOutOfBoundsException : Index 0 requested, 光标大小为 0
- android - 使用 AppCompat 时,我们是否需要明确指定其 UI 组件(Spinner、EditText)颜色
首先,感谢您阅读本文并提供帮助。
我正在尝试使用 tersorflow 中的 conv2d 为 mnist 分类构建输入管道。我的数据集是 kaggle csv 文件,该文件有行:标签,特征 1,特征 2,特征 3,...,特征 784
代码如下:
import numpy as np
import pandas as pd
import time
import tensorflow as tf
from math import ceil
import matplotlib.pyplot as plt
import matplotlib.cm as cm
test_state = False
submission_state = True
continue_training = True
verbose = False
run_n = 1
if test_state:
epochs = 1
submission_state = False
else:
epochs = 33
batch_size = 128
save_freq = 1000
summaries_freq = 33
save_dir = "./save/"
save_name = "cnn.ckpt"
summaries_train_dir = './summaries/train/train{0}'.format(run_n)
summaries_eval_dir = './summaries/eval/eval{0}'.format(run_n)
if submission_state:
save_dir = "./savesubmission/"
save_name = "cnnsubmit{0}.ckpt".format(run_n)
summaries_train_dir = './summaries/train/submit{0}'.format(run_n)
submit_file = "MNIST_Kaggle_submission_file{0}.csv".format(run_n)
#DEBUG
display_img = False
n_display = 5
##############################################FUNÇOES AUXILIARES########################################################
def get_data(file): #realiza a aquisiçao dos dados no formato apropriado para serem utilizados no treinamento
data = pd.read_csv(file) #le o arquivo csv com a biblioteca pandas
if(verbose == True):
print('data({0[0]},{0[1]})'.format(data.shape)) #printa o formato dos dados adquiridos (n_samples, n_pixels+n_labels)
images = data.iloc[:,1:].values #Desfaz o DataFrame que é criado quando importa-se os dados pelo pandas
images = images.astype(np.float) #Converte dados para float32
images = np.multiply(images, 1.0 / 255.0)# Normaliza a intensidade de pixel de [0:255]
# para [0.0:1.0], ajuda na aprendizagem
image_size = images.shape[1]
image_width = image_height = np.ceil(np.sqrt(image_size)).astype(np.uint8)
if(verbose == True):
print('image_width => {0}\nimage_height => {1}'.format(image_width,image_height))
images = images.reshape([-1, image_width, image_height, 1])
labels = data[[0]].values.ravel() #adquire os rotulos das imagens em ordem
n_classes = np.unique(labels).shape[0] #Retorna o numero de rotulos unicos no array (sem contar repetidos)
n_labels = labels.shape[0]
index_offset = np.arange(n_labels) * n_classes
labels_one_hot = np.zeros((n_labels, n_classes))
labels_one_hot.flat[index_offset + labels.ravel()] = 1
labels_one_hot = labels_one_hot.astype(np.uint8)
if(verbose == True):
print("X shape: {0}, Y shape: {1}".format(images.shape, labels_one_hot.shape))
return dict(
Y = labels_one_hot,
X = images,
n_samples = int(data.shape[0]),
n_features = int(images.shape[1]),
n_classes = int(n_classes)
)
def split_data(dataset, eval_size=0.2): #divide o set de dados em treinamento e validaçao
eval_size = int(dataset['n_samples'] * eval_size)
eval_images = dataset['X'][:eval_size]
eval_labels = dataset['Y'][:eval_size]
train_images = dataset['X'][eval_size:]
train_labels = dataset['Y'][eval_size:]
if(verbose == True):
print('train images shape: {0}'.format(train_images.shape))
print('eval images shape: {0}'.format(eval_images.shape))
return dict(
Y = train_labels,
X = train_images,
Y_eval = eval_labels,
X_eval = eval_images,
n_samples = int(train_images.shape[0]),
n_eval = int(eval_images.shape[0]),
n_features = int(train_images.shape[1]),
n_classes = int(train_labels.shape[1])
)
def iterator(data, batch_index, batch_size, shuffle):
if shuffle:
index = np.random.choice(data['n_samples'], batch_size)
X_iter = data['X'][index]
Y_iter = data['Y'][index]
yield (X_iter, Y_iter)
else:
if ((batch_index + 1) > (data['n_samples'] // batch_size)):
index = np.arange((data['n_samples'] // batch_size) * batch_size,
(data['n_samples'] // batch_size) * batch_size + ceil((data['n_samples'] % batch_size)))
X_iter = data['X'][index]
Y_iter = data['Y'][index]
yield (X_iter, Y_iter)
else:
index = np.arange(batch_index * batch_size, (batch_index + 1) * batch_size)
X_iter = data['X'][index]
Y_iter = data['Y'][index]
yield (X_iter, Y_iter)
def get_epoch(data, epochs, batch_size, shuffle):
n_batches = (data['n_samples'] // batch_size) + 1
for _ in range(epochs):
for batch_index in range(n_batches):
yield iterator(data, batch_index, batch_size, shuffle)
def display(X, label):
for index in range(n_display):
image = X[index].reshape(28,28)
plt.axis('off')
plt.imshow(image, cmap=cm.binary)
plt.title(str(np.argmax(label[index])))
plt.show()
##################################################CNN GRAPH#############################################################
def reset_graph():
if 'sess' in globals() and sess:
sess.close()
tf.reset_default_graph()
def model():
reset_graph()
X = tf.placeholder(tf.float32, [None, 28, 28, 1])
Y = tf.placeholder(tf.float32, [None, 10])
dropout = tf.placeholder(tf.float32)
with tf.name_scope('trainning_time'):
time = tf.placeholder(tf.float32)
with tf.name_scope('mi_params'):
window = tf.placeholder(tf.int32)
loss = tf.placeholder(tf.float32, [None])
p_keep_conv = p_keep_hidden = dropout
with tf.variable_scope('weights') as scope:
w1a = init_var('w1a', [3, 3, 1, 8]) # 3x3x1 conv, 32 outputs
B1a = init_var('B1a', [8])
w1b = init_var('w1b', [3, 3, 8, 16]) # 3x3x1 conv, 32 outputs
B1b = init_var('B1b', [16])
w2a = init_var('w2a', [3, 3, 16, 32]) # 3x3x32 conv, 64 outputs
B2a = init_var('B2a', [32])
w3a = init_var('w3a', [3, 3, 32, 64]) # 3x3x32 conv, 64 outputs
B3a = init_var('B3a', [64])
w4 = init_var('w4', [64 * 4 * 4, 1024]) # FC 64 * 4 * 4 inputs, 128 outputs
B4 = init_var('B4' ,[1024])
w_o = init_var('w_o', [1024, 10]) # FC 625 inputs, 10 outputs (labels)
B_o = init_var('B_o', [10])
adami = tf.Variable(tf.constant(0.999999, dtype=tf.float32), 'adami')
with tf.variable_scope('cells') as scope:
l1a = tf.nn.elu(tf.add(tf.nn.conv2d(X, w1a,
strides=[1, 1, 1, 1], padding='SAME'), B1a))
l1b = tf.nn.elu(tf.add(tf.nn.conv2d(l1a, w1b,
strides=[1, 1, 1, 1], padding='SAME'), B1b))
l1 = tf.nn.max_pool(l1b, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
l1 = tf.nn.dropout(l1, p_keep_conv)
l2a = tf.nn.elu(tf.add(tf.nn.conv2d(l1, w2a,
strides=[1, 1, 1, 1], padding='SAME'), B2a))
l2 = tf.nn.max_pool(l2a, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
l2 = tf.nn.dropout(l2, p_keep_conv)
l3a = tf.nn.elu(tf.add(tf.nn.conv2d(l2, w3a,
strides=[1, 1, 1, 1], padding='SAME'), B3a))
l3 = tf.nn.max_pool(l3a, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
l3 = tf.nn.dropout(l3, p_keep_conv)
l3_plain = tf.reshape(l3, [-1, w4.get_shape().as_list()[0]])
l4 = tf.nn.elu(tf.add(tf.matmul(l3_plain, w4), B4))
l4 = tf.nn.dropout(l4, p_keep_hidden)
logits = tf.add(tf.matmul(l4, w_o), B_o) #printa o formato da matriz de rotulos para conferencia
prediction = tf.argmax(tf.nn.softmax(logits), 1)
with tf.name_scope('total_loss'):
total_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, Y))
with tf.name_scope('learning_rate'):
learning_rate = tf.add(tf.nn.relu(tf.multiply(7e-4, adami)), 1e-12)
def adami_down(adami, learning_rate):
new = tf.add(tf.nn.relu(tf.sub(adami, tf.multiply(adami, learning_rate))), 1e-6)
with tf.control_dependencies([tf.assign(adami, new)]):
return tf.identity(adami)
adami = tf.cond(tf.reduce_mean(loss[-window:]) >= tf.reduce_mean(loss[-(2 * window):-(window)]),
lambda: adami_down(adami, learning_rate), lambda: tf.identity(adami))
train_step = tf.train.AdamOptimizer(learning_rate).minimize(total_loss)
tf.add_to_collection('train_step', train_step)
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.add_to_collection('accuracy', accuracy)
tf.summary.histogram('w1a', w1a)
tf.summary.histogram('B1a', B1a)
tf.summary.histogram('w1b', w1b)
tf.summary.histogram('B1b', B1b)
tf.summary.histogram('w2a', w2a)
tf.summary.histogram('B2a', B2a)
tf.summary.histogram('w3a', w3a)
tf.summary.histogram('B3a', B3a)
tf.summary.histogram('w_o', w_o)
tf.summary.histogram('B_o', B_o)
tf.summary.histogram('l1a', l1a)
tf.summary.histogram('l1b', l1b)
tf.summary.histogram('l1_pool', l1)
tf.summary.histogram('l2a', l2a)
tf.summary.histogram('l2_pool', l2)
tf.summary.histogram('l3a', l3a)
tf.summary.histogram('l3_pool', l3)
tf.summary.histogram('l4_fully', l4)
elapsed_time = tf.summary.scalar('trainning_time', time)
tf.summary.scalar('total_loss', total_loss)
tf.summary.scalar('accuracy', accuracy)
tf.summary.scalar('learning_rate', learning_rate)
tf.summary.scalar('adami', adami)
tf.summary.scalar('dropout_keep_probability', dropout)
summary_op = tf.summary.merge_all()
return dict(
x = X,
y = Y,
learning_rate = learning_rate,
pred = prediction,
train_step = train_step,
total_loss = total_loss,
accuracy = accuracy,
adami = adami,
window = window,
loss = loss,
dropout = dropout,
time = time,
elapsed_time = elapsed_time,
summaries = summary_op,
saver=tf.train.Saver()
)
def init_var(name, shape):
return tf.get_variable(name, shape, initializer=tf.contrib.layers.xavier_initializer())
################################################# TRAINING #############################################################
def train_network(data, g, epochs, batch_size, save, verbose=True):
with tf.Session(graph=tf.get_default_graph()) as sess:
train_writer = tf.train.SummaryWriter(summaries_train_dir,
sess.graph)
if(continue_training == False):
sess.run(tf.global_variables_initializer())
print("Model created.")
if isinstance(save, str):
g['saver'].save(sess, save)
print('Model Saved.')
else:
new_saver = tf.train.import_meta_graph('{0}.meta'.format(tf.train.latest_checkpoint('{0}'.format(save_dir))))
new_saver.restore(sess, tf.train.latest_checkpoint('{0}'.format(save_dir)))
print("Model Restored.")
print("Resuming Training.")
sess.graph.finalize()
training_losses = []
accu = []
window = int(42)
for k in range(2 * window):
training_losses.append(2.5)
for idx, epoch in enumerate(get_epoch(data, epochs, batch_size, shuffle=True)):
t = time.time()
training_loss = 0
accuracy = 0
steps = 0
for X, Y in epoch:
feed_dict={g['x']: X,
g['y']: Y,
g['window']: window,
g['loss']: training_losses,
g['dropout']: 0.72,
g['time']: time.time() - t,
}
training_loss_ , _, accuracy_, adami, learning_rate = sess.run([g['total_loss'],
g['train_step'],
g['accuracy'],
g['adami'],
g['learning_rate']],
feed_dict)
training_loss += training_loss_
accuracy += accuracy_
elapsed_time = sess.run(g['elapsed_time'], feed_dict={g['time']: time.time() - t})
train_writer.add_summary(elapsed_time, global_step=int((idx * batch_size) + steps))
if(idx%summaries_freq == 0):
summary = sess.run(g['summaries'], feed_dict)
train_writer.add_summary(summary, global_step=int((idx * batch_size) + steps))
steps += 1
training_losses.append(training_loss/steps)
accu.append(accuracy/steps)
if(idx%save_freq == 0 and idx != 0):
if isinstance(save, str):
g['saver'].save(sess, save, global_step=idx)
print('Model Saved.')
n_iterations = ((epochs * data['n_samples']) // batch_size)
if verbose:
print("Training loss for iteration {0}/{1}: {2}, Learning Rate:{6}, AdaMi: {8},"
" Accuracy: {7} %, ETC: {3}:{4}:{5}".format(idx,
n_iterations,
training_loss / steps,
int(((time.time() - t) * (n_iterations - idx + 1)) // 3600),
int((((time.time() - t) * (n_iterations - idx + 1)) % 3600) // 60),
int((((time.time() - t) * (n_iterations - idx + 1)) % 3600) % 60),
learning_rate,
(accuracy / steps) * 100,
adami))
print("It took {0} seconds to train this epoch.".format(time.time() - t))
g['saver'].save(sess, save)
print('Model Saved.')
return [training_losses, accu]
# output image
if(display_img == True):
for i in range(n_display):
display(data['X'], data['Y'])
g = model()
t = time.time()
mnist = get_data(file='train.csv') # chama a funçao get_data
if (verbose == True):
# printa o formato da matriz de rotulos para conferencia
print('labels shape: ({0} samples, {1} classes)'.format(mnist['n_samples'], mnist['n_classes']))
# printa o formato da matriz de imagens para conferencia
print(
'images shape: ({0} samples, {1[1]} x {1[2]} x {1[3]} pixels)'.format(mnist['n_samples'], mnist['X'].shape))
# chama a funçao split_data, dividindo o set de dados em treinamento e eval
if not submission_state:
data = split_data(mnist)
if submission_state:
data = mnist
print("Starting Trainning")
# dos dados e no tamanho do batch
losses , accuracy = train_network(data, g, epochs ,batch_size, save="{0}{1}".format(save_dir,save_name))
print("It took {0} seconds to train for {1} epochs.".format(time.time()-t, epochs))
print("The average loss on the final epoch was:", np.mean(losses[-1]))
##################################################### EVAL #############################################################
def eval_network(data, g, batch_size, verbose):
with tf.Session(graph=tf.get_default_graph()) as sess:
eval_writer = tf.train.SummaryWriter(summaries_eval_dir)
new_saver = tf.train.import_meta_graph('{0}.meta'.format(tf.train.latest_checkpoint('{0}'.format(save_dir))))
new_saver.restore(sess, tf.train.latest_checkpoint('{0}'.format(save_dir)))
print("Model Restored.")
sess.graph.finalize()
eval_losses = []
accu = []
window = int(7)
for k in range(2 * window):
eval_losses.append(0)
for idx, epoch in enumerate(get_epoch(data, 1, batch_size, shuffle=False)):
eval_loss = 0
steps = 0
accuracy = 0
t = time.time()
for X, Y in epoch:
feed_dict={g['x']: X,
g['y']: Y,
g['window']: window,
g['loss']: eval_losses,
g['dropout']: 1.0,
g['time']: time.time() - t,
}
eval_loss_ , pred , accuracy_ = sess.run([g['total_loss'],
g['pred'],
g['accuracy']],
feed_dict)
if(idx % summaries_freq == 0):
summary = sess.run(g['summaries'], feed_dict)
eval_writer.add_summary(summary, global_step=int((idx * batch_size) + steps))
eval_loss += eval_loss_
accuracy += accuracy_
steps += 1
if verbose:
print("Average eval loss: {0}, Accuracy: {1} % ".format(eval_loss/steps, (accuracy/steps) * 100))
print("It took", time.time() - t, "seconds to eval this epoch.")
eval_losses.append(eval_loss/steps)
accu.append(accuracy/steps)
return [eval_losses, accu]
t = time.time()
g = model()
if not submission_state:
data = dict(X = data['X_eval'], Y = data['Y_eval'], n_samples = data['n_eval'])
# dos dados e no tamanho do batch
print("Evaluating NN")
losses, accuracy = eval_network(data, g, batch_size, verbose)
print("It took {0} seconds to eval".format(time.time() - t,))
print("The average loss was: {0}, and the accuracy was: {1} %".format(np.mean(losses), np.mean(accuracy) * 100))
########################################## PREDICT AND SAVE FOR SUBMISSION #############################################
def get_pred_data(file): #realiza a aquisiçao dos dados no formato apropriado para serem utilizados no treinamento
data = pd.read_csv(file) #le o arquivo csv com a biblioteca pandas
if(verbose == True):
print('data({0[0]},{0[1]})'.format(data.shape)) #printa o formato dos dados adquiridos (n_samples, n_pixels+n_labels)
images = data.values #Desfaz o DataFrame que é criado quando importa-se os dados pelo pandas
images = images.astype(np.float) #Converte dados para float32
images = np.multiply(images, 1.0 / 255.0)# Normaliza a intensidade de pixel de [0:255]
# para [0.0:1.0], ajuda na aprendizagem
image_size = images.shape[1]
image_width = image_height = np.ceil(np.sqrt(image_size)).astype(np.uint8)
if(verbose == True):
print('image_width => {0}\nimage_height => {1}'.format(image_width,image_height))
images = images.reshape([-1, image_width, image_height, 1])
labels = np.zeros((int(data.shape[0]), 10))
if(verbose == True):
print("X shape: {0}".format(images.shape))
return dict(
X = images,
Y = labels,
n_samples = int(data.shape[0]),
n_features = int(images.shape[1]),
)
def predict_logits(data, g, batch_size, verbose):
with tf.Session(graph=tf.get_default_graph()) as sess:
new_saver = tf.train.import_meta_graph('{0}.meta'.format(tf.train.latest_checkpoint('{0}'.format(save_dir))))
new_saver.restore(sess, tf.train.latest_checkpoint('{0}'.format(save_dir)))
print("Model Restored.")
sess.graph.finalize()
predictions = []
index = []
for idx, epoch in enumerate(get_epoch(data, 1, batch_size, shuffle=False)):
steps = 0
t = time.time()
for X, Y in epoch:
feed_dict={g['x']: X,
g['dropout']: 1.0,
}
pred = sess.run(g['pred'], feed_dict)
predictions.extend(pred.ravel())
steps += 1
if verbose:
print("It took", time.time() - t, "seconds to make the predictions.")
answer = []
for idx, value in enumerate(predictions):
index.append(idx + 1)
answer.append(value)
predictions = pd.DataFrame(answer, index=index)
print("Predictions:\n Index, Label\n", predictions)
return predictions
if submission_state or test_state:
mnist = get_pred_data(file='test.csv') #chama a funçao get_data
if(verbose == True):
#printa o formato da matriz de rotulos para conferencia
print('labels shape: ({0} samples)'.format(mnist['n_samples']))
#printa o formato da matriz de imagens para conferencia
print('images shape: ({0} samples, {1[1]} x {1[2]} x {1[3]} pixels)'.format(mnist['n_samples'], mnist['X'].shape))
t = time.time()
g = model()
print("Making Predictions")
predictions = predict_logits(mnist, g, batch_size, verbose)
print("It took {0} seconds to predict logits".format(time.time() - t,))
print("Saving predictions to csv file.")
predictions.to_csv(submit_file, index_label=['ImageId'], header=['Label'])
print("File saved.")
reset_graph()
发生的事情就是它挂起:
training_loss_ , _, accuracy_, adami, learning_rate = sess.run([g['total_loss'],
g['train_step'],
g['accuracy'],
g['adami'],
g['learning_rate']],
feed_dict)
我可以在张量板上看到我的队列没有被填满,但我找不到原因。
任何帮助将不胜感激!!!
最佳答案
标准 MNIST 图像具有 28 x 28 = 784 像素。假设 CSV 文件中的每一列都是一个不同的像素,加上标签的一列,代码中的以下行:
for column in range(749):
……应该是:
for column in range(785):
关于python - Queue.dequeue 卡在 Tensorflow 输入管道中,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/41071395/
在 Android 的 API > 19 中是否有任何方法可以获取可移动 SD 卡的路径? 与外部 SD 卡一样,我们有 Environment.getExternalStorageDirectory
一些 Android 设备有 microSD(或其他存储卡)插槽,通常安装为 /storage/sdcard1 据我所知,自 Android 4.4 起 Google 限制了对此内存的访问,并在 An
我使用 Java Card 2.1.2 SDK 和 GPShell 作为与设备通信的方式在 Java Card 上构建一个项目。我从 GpShell 测试了 helloworld 示例,并成功发送了
我开发了一个应用程序,它有一个来电接收器,它适用于所有手机。一位用户有一部双 SIM 卡安卓手机。该应用程序适用于第一张 SIM 卡。但是当有人调用他的第二张 SIM 卡时,我们的应用程序不会被调用。
我有一个带预览的文件输入。 这是笔 Codepen 我想强制高度,我无法理解我该怎么做。我想将此组件的高度固定为 300px(示例),我还需要保持加载图像的正确纵横比,用灰色背景填充空白。现在我保持宽
关闭。这个问题不符合Stack Overflow guidelines .它目前不接受答案。 想改进这个问题?将问题更新为 on-topic对于堆栈溢出。 6年前关闭。 Improve this qu
我正在使用此代码访问 SD card : import os from os.path import join from jnius import autoclass #from android.pe
我正在为数据记录设备编写固件。它以 20 Hz 的频率从传感器读取数据并将数据写入 SD 卡。但是,向SD卡写入数据的时间并不一致(大约200-300 ms)。因此,一种解决方案是以一致的速率将数据写
我正在使用以下代码将视频放到网站上,但是在垂直方向上,手机屏幕上只能看到视频的左半部分 我不是网络开发人员。有人可以告诉我确切的内容吗,如何使其正确放置在手机屏幕上? 是在youtube iframe
我正在使用 Vuetify 1.5 和 Vuetify 网格系统来设置我的布局。现在我有一个组件 HelloWorld我将其导入到我的 Parent 中成分。我已经在我的 HelloWorld 中设置
我使用 python 制作了一个简单的二十一点游戏。我制作了游戏的其余部分,但我正在努力放入 ASCII 卡,所以这只是代码的一小部分。我尝试将 * len(phand) 放在附加行的末尾。虽然这确实
我正在使用玩家卡设置 Twitter 卡。它可以在预览工具中运行,但文档说它需要在“twitter.com 现代桌面浏览器? native iOs 和 Android Twitter 应用程序?mob
任何旧的 GSM 兼容 SIM 卡(3G USIM 的奖励)。 我想我需要一些硬件?谁能为业余爱好者推荐一些便宜的东西,以及一些更专业的东西? 我认为会有一个带有硬件的 API 的完整文档,所以也许这
我使用 python 制作了一个简单的二十一点游戏。我制作了游戏的其余部分,但我正在努力放入 ASCII 卡,所以这只是代码的一小部分。我尝试将 * len(phand) 放在附加行的末尾。虽然这确实
我记得前一段时间读到有 cpu 卡供系统添加额外的处理能力来进行大规模并行化。任何人都有这方面的经验和任何资源来研究项目的硬件和软件方面吗?这项技术是否不如传统集群?它更注重功率吗? 最佳答案 有两个
我检查外部存储是否已安装并且可用于读/写,然后从中读取。我使用的是确切的官方 Android 示例代码 ( from here )。 它说外部存储未安装。 getExternalFilesDir(nu
在 Android 2.1 及更低版本中,Android 应用程序可以请求下载到 SD 卡上吗?另外我想知道应用程序是否可以请求一些包含视频的文件夹下载到 SD 卡上?以及如何做到这一点? 提前致谢。
我们编写了一个 Windows 设备驱动程序来访问我们的自定义 PCI 卡。驱动程序使用 CreateFile 获取卡的句柄。 我们最近在一次安装中遇到了问题,卡似乎停止工作了。我们尝试更换卡(更换似
有些新设备(例如 Samsung Galaxy)带有两个 SD 卡。我想知道是否有任何方法可以确定设备是否有两张 SD 卡或一张 SD 卡。谢谢 最佳答案 我认为唯一的方法是使用 检查可用根的列表 F
我正在尝试将文件读/写到 SD 卡。我已经尝试在我的真实手机和 Eclipse 中的模拟器上执行此操作。在这两种设备上,对/mnt/sdcard/或/sdcard 的权限仅为“d--------”,我
我是一名优秀的程序员,十分优秀!