在数据表的行中设置开关控件的值

时间:2018-09-30 10:57:30

标签: flutter

我要将switch widget放置在数据表行中,但是有一个问题,因为我不熟悉flutter语法,所以我不知道应该在哪里设置switch widget的值

class DevicePage extends StatefulWidget {
  @override
  DevicePageState createState() {
    return new DevicePageState();
  }
}
  class DevicePageState extends State<DevicePage>{
    Widget bodyData()=>DataTable(
        columns:<DataColumn>[
          DataColumn(
            label: Text('وضعیت',style: TextStyle(color: Colors.deepPurple,fontWeight: FontWeight.bold,fontSize: 13.0),),
            numeric: false,
            onSort: (i,b){},
            tooltip: "to display first name of th e name"
          ),
          DataColumn(
              label: Text('عملکرد',style: TextStyle(color: Colors.deepPurple,fontWeight: FontWeight.bold,fontSize: 13.0),),
              numeric: false,
              onSort: (i,b){},
              tooltip: "to display Last name of th e name"
          ),
        ],

      rows: names.map((name)=>DataRow(
        cells: [
          DataCell(

          Switch( value: _value, onChanged: (bool value){_onChaned(value);}),
            ),

          DataCell(
            new Text(name.lastName,style: TextStyle(color: Colors.blueAccent,fontWeight: FontWeight.bold,fontSize: 12.0),),
            showEditIcon: false,
            placeholder: false,
          ),

        ],
      ),
      ).toList()
    ) ;

    bool _value = false;
    int index;
    void _onChaned(bool value ){
      setState(() {
         _value=value;
      });
    }

和tnx为您提供帮助;)

1 个答案:

答案 0 :(得分:0)

您可以执行以下操作:

import numpy as np
import tensorflow as tf

BATCH_SIZE = 100

# Data Placeholders
t = tf.placeholder(tf.bool, name='IfTrain_placeholder')                                                     # if we are in training phase
X = tf.placeholder(dtype=tf.float32, shape=[None, 28, 28, 1], name='Data_placeholder')
y = tf.placeholder(dtype=tf.int32, shape=[None], name='Label_placeholder')

# Use Datasets to manage data
X_data = tf.data.Dataset.from_tensor_slices(X).batch(BATCH_SIZE)
y_data = tf.data.Dataset.from_tensor_slices(y).batch(BATCH_SIZE)

X_iter = X_data.make_initializable_iterator()
X_batch = X_iter.get_next()

y_iter = y_data.make_initializable_iterator()
y_batch = y_iter.get_next()

oh_y = tf.one_hot(indices=y_batch, depth=10)

# Model structure here
c1 = tf.layers.conv2d(inputs=X_batch, 
                      filters=32, 
                      kernel_size=[5,5], 
                      padding='same', 
                      activation=tf.nn.relu, 
                      name='CNN1')

m1 = tf.layers.max_pooling2d(inputs=c1, 
                             pool_size=[2,2], 
                             strides=2, 
                             padding='same',
                             name='MaxPool1')

c2 = tf.layers.conv2d(inputs=m1, 
                      filters=64, 
                      kernel_size=[5,5], 
                      padding='same', 
                      activation=tf.nn.relu, 
                      name='CNN2')

m2 = tf.layers.max_pooling2d(inputs=c2, 
                             pool_size=[2,2], 
                             strides=2, 
                             padding='same', 
                             name='MaxPool2')

f1 = tf.reshape(tensor=m2, shape=[-1, 7*7*64], name='Flat1')

d1 = tf.layers.dense(inputs=f1, 
                     units=1024, 
                     activation=tf.nn.softmax, 
                     name='Dense1')

dr1 = tf.layers.dropout(inputs=d1, rate=0.4, training=t, name='Dropout1')

d2 = tf.layers.dense(inputs=dr1, 
                     units=10, 
                     activation=tf.nn.softmax, 
                     name='Dense2')

# Loss and otimization
loss = tf.losses.softmax_cross_entropy(onehot_labels=oh_y, logits=d2)
classes = tf.argmax(input=d2, axis=1, name='ArgMax1')

init = tf.global_variables_initializer()

optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.003, name='GD1')
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step(), name='Optimizer1')

# Get data
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
X_train = np.reshape(mnist.train.images, (-1, 28, 28, 1))
y_train = np.asarray(mnist.train.labels, dtype=np.int32)
X_test = np.reshape(mnist.test.images, (-1, 28, 28, 1))
y_test = np.asarray(mnist.test.labels, dtype=np.int32)

# Run session
with tf.Session() as sess:
  sess.run(init)

  sess.run(X_iter.initializer, feed_dict={X:X_train})
  sess.run(y_iter.initializer, feed_dict={y:y_train})

  while True:
    try:
      out = sess.run({'accuracy': accuracy, 'loss': loss, 'train optimizer': train_op}, feed_dict={t:True})

  print(out['loss'])
except:
  break

您可能想调用setState来查看更改:

Switch(
    value: _switchValue, 
    onChanged: (bool value) {
        _switchValue = value;
    },
)