使用配置文件在React Native中进行动态样式

时间:2018-07-10 07:00:24

标签: reactjs react-native react-router react-redux reactive-programming

我打算使用像app.json这样的主配置文件,并在应用启动时加载它,在其中为应用名称添加键值。基于应用程序名称,我需要应用该应用程序目录样式配置文件中的样式。我不要求更改也应在运行时适用。它只是在启动时一次。不使用npm模块就可以吗?

2 个答案:

答案 0 :(得分:1)

使用以下模式创建两个js文件,例如styleSheet1和styleSheet2:

'use strict';

var React = require('react-native');

var myStyle = React.StyleSheet.create({
   style1: { },
   style2: { }
)}

module.exports = myStyle;

添加变量以按名称标识应用

var appName='App1';
var customStyle;

添加以下条件

componentWillMount() {

   if(appName=='App1'){
    customStyle = require('./styleSheet1');
  }else{
    customStyle = require('./styleSheet2');
  }
}

然后根据条件,组件将采用样式

<View style = {customStyle .style1} />

希望这会有所帮助。

答案 1 :(得分:0)

是的,您可以这样做:

[Stage 1:====================================================>  (191 + 2) / 200]18/07/10 12:33:40 ERROR Utils: Aborting task
java.lang.AbstractMethodError: org.apache.spark.sql.execution.datasources.OutputWriterFactory.newInstance(Ljava/lang/String;Lscala/Option;Lorg/apache/spark/sql/types/StructType;Lorg/apache/hadoop/mapreduce/TaskAttemptContext;)Lorg/apache/spark/sql/execution/datasources/OutputWriter;
        at org.apache.spark.sql.execution.datasources.BaseWriterContainer.newOutputWriter(WriterContainer.scala:131)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer.org$apache$spark$sql$execution$datasources$DynamicPartitionWriterContainer$$newOutputWriter(WriterContainer.scala:361)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer$$anonfun$writeRows$4.apply$mcV$sp(WriterContainer.scala:428)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer$$anonfun$writeRows$4.apply(WriterContainer.scala:416)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer$$anonfun$writeRows$4.apply(WriterContainer.scala:416)
        at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1345)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer.writeRows(WriterContainer.scala:438)
        at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1$$anonfun$apply$mcV$sp$1.apply(InsertIntoHadoopFsRelationCommand.scala:143)
        at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1$$anonfun$apply$mcV$sp$1.apply(InsertIntoHadoopFsRelationCommand.scala:143)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
        at org.apache.spark.scheduler.Task.run(Task.scala:86)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
18/07/10 12:33:40 ERROR Utils: Aborting task
java.lang.AbstractMethodError: org.apache.spark.sql.execution.datasources.OutputWriterFactory.newInstance(Ljava/lang/String;Lscala/Option;Lorg/apache/spark/sql/types/StructType;Lorg/apache/hadoop/mapreduce/TaskAttemptContext;)Lorg/apache/spark/sql/execution/datasources/OutputWriter;
        at org.apache.spark.sql.execution.datasources.BaseWriterContainer.newOutputWriter(WriterContainer.scala:131)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer.org$apache$spark$sql$execution$datasources$DynamicPartitionWriterContainer$$newOutputWriter(WriterContainer.scala:361)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer$$anonfun$writeRows$4.apply$mcV$sp(WriterContainer.scala:428)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer$$anonfun$writeRows$4.apply(WriterContainer.scala:416)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer$$anonfun$writeRows$4.apply(WriterContainer.scala:416)
        at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1345)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer.writeRows(WriterContainer.scala:438)
        at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1$$anonfun$apply$mcV$sp$1.apply(InsertIntoHadoopFsRelationCommand.scala:143)
        at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1$$anonfun$apply$mcV$sp$1.apply(InsertIntoHadoopFsRelationCommand.scala:143)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
        at org.apache.spark.scheduler.Task.run(Task.scala:86)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
18/07/10 12:33:40 WARN FileOutputCommitter: Could not delete hdfs://prism-0.xor-ind.xoriant.com:8020/user/sanket/AVRO1/_temporary/0/_temporary/attempt_201807101233_0001_m_000031_0
18/07/10 12:33:40 ERROR DynamicPartitionWriterContainer: Task attempt attempt_201807101233_0001_m_000031_0 aborted.
18/07/10 12:33:40 ERROR Executor: Exception in task 31.0 in stage 1.0 (TID 195)
org.apache.spark.SparkException: Task failed while writing rows
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer.writeRows(WriterContainer.scala:446)
        at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1$$anonfun$apply$mcV$sp$1.apply(InsertIntoHadoopFsRelationCommand.scala:143)
        at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1$$anonfun$apply$mcV$sp$1.apply(InsertIntoHadoopFsRelationCommand.scala:143)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
        at org.apache.spark.scheduler.Task.run(Task.scala:86)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.AbstractMethodError: org.apache.spark.sql.execution.datasources.OutputWriterFactory.newInstance(Ljava/lang/String;Lscala/Option;Lorg/apache/spark/sql/types/StructType;Lorg/apache/hadoop/mapreduce/TaskAttemptContext;)Lorg/apache/spark/sql/execution/datasources/OutputWriter;
        at org.apache.spark.sql.execution.datasources.BaseWriterContainer.newOutputWriter(WriterContainer.scala:131)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer.org$apache$spark$sql$execution$datasources$DynamicPartitionWriterContainer$$newOutputWriter(WriterContainer.scala:361)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer$$anonfun$writeRows$4.apply$mcV$sp(WriterContainer.scala:428)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer$$anonfun$writeRows$4.apply(WriterContainer.scala:416)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer$$anonfun$writeRows$4.apply(WriterContainer.scala:416)
        at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1345)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer.writeRows(WriterContainer.scala:438)
        ... 8 more
18/07/10 12:33:40 WARN FileOutputCommitter: Could not delete hdfs://prism-0.xor-ind.xoriant.com:8020/user/sanket/AVRO1/_temporary/0/_temporary/attempt_201807101233_0001_m_000001_0
18/07/10 12:33:40 ERROR DynamicPartitionWriterContainer: Task attempt attempt_201807101233_0001_m_000001_0 aborted.
18/07/10 12:33:40 ERROR Executor: Exception in task 1.0 in stage 1.0 (TID 194)
org.apache.spark.SparkException: Task failed while writing rows
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer.writeRows(WriterContainer.scala:446)
        at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1$$anonfun$apply$mcV$sp$1.apply(InsertIntoHadoopFsRelationCommand.scala:143)
        at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1$$anonfun$apply$mcV$sp$1.apply(InsertIntoHadoopFsRelationCommand.scala:143)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
        at org.apache.spark.scheduler.Task.run(Task.scala:86)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.AbstractMethodError: org.apache.spark.sql.execution.datasources.OutputWriterFactory.newInstance(Ljava/lang/String;Lscala/Option;Lorg/apache/spark/sql/types/StructType;Lorg/apache/hadoop/mapreduce/TaskAttemptContext;)Lorg/apache/spark/sql/execution/datasources/OutputWriter;
        at org.apache.spark.sql.execution.datasources.BaseWriterContainer.newOutputWriter(WriterContainer.scala:131)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer.org$apache$spark$sql$execution$datasources$DynamicPartitionWriterContainer$$newOutputWriter(WriterContainer.scala:361)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer$$anonfun$writeRows$4.apply$mcV$sp(WriterContainer.scala:428)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer$$anonfun$writeRows$4.apply(WriterContainer.scala:416)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer$$anonfun$writeRows$4.apply(WriterContainer.scala:416)
        at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1345)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer.writeRows(WriterContainer.scala:438)
        ... 8 more
18/07/10 12:33:40 WARN TaskSetManager: Lost task 1.0 in stage 1.0 (TID 194, localhost): org.apache.spark.SparkException: Task failed while writing rows
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer.writeRows(WriterContainer.scala:446)
        at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1$$anonfun$apply$mcV$sp$1.apply(InsertIntoHadoopFsRelationCommand.scala:143)
        at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1$$anonfun$apply$mcV$sp$1.apply(InsertIntoHadoopFsRelationCommand.scala:143)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
        at org.apache.spark.scheduler.Task.run(Task.scala:86)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.AbstractMethodError: org.apache.spark.sql.execution.datasources.OutputWriterFactory.newInstance(Ljava/lang/String;Lscala/Option;Lorg/apache/spark/sql/types/StructType;Lorg/apache/hadoop/mapreduce/TaskAttemptContext;)Lorg/apache/spark/sql/execution/datasources/OutputWriter;
        at org.apache.spark.sql.execution.datasources.BaseWriterContainer.newOutputWriter(WriterContainer.scala:131)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer.org$apache$spark$sql$execution$datasources$DynamicPartitionWriterContainer$$newOutputWriter(WriterContainer.scala:361)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer$$anonfun$writeRows$4.apply$mcV$sp(WriterContainer.scala:428)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer$$anonfun$writeRows$4.apply(WriterContainer.scala:416)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer$$anonfun$writeRows$4.apply(WriterContainer.scala:416)
        at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1345)
        at org.apache.spark.sql.execution.datasources.DynamicPartitionWriterContainer.writeRows(WriterContainer.scala:438)
        ... 8 more

这是一个可行的示例(使用导入的JS对象,而不是普通的JSON文件): https://snack.expo.io/@zvona/importing-styles