如何在Cassandra中使用Hadoop map-reduce生成SSTable文件?

时间:2017-02-01 19:57:30

标签: hadoop cassandra mapreduce bulk-load

我在HDFS上有500GB数据传输到Cassandra集群。我认为最快的方法是使用Cassandra sstableloadersstable文件批量加载到Cassandra中。

Cassandra 3.x提供了客户端API CQLSSTableWriter来生成sstable个文件,这些文件似乎适用于单个计算机,但速度很慢;如何使用map-reduce生成sstables

在批量加载到hbase之前,Hbase提供了通过map-reduce从hdfs数据生成Hbase表格式文件的工具。 Cassandra有类似的方法吗?

************经过多次尝试后于2017/02/21添加******** 从答案中得到一些提示,我搜索了一些关键词并将一些材料组合在一起以形成下面的测试程序,但似乎缺少一些配置或逻辑( 错误:org.apache.cassandra.exceptions.ConfigurationException:在启动reduce period时期望变量中的URI:[cassandra.config])。我想即使我解决了这个问题,也会出现新的问题。所以如果有人可以在hadoop MR中为我提供完整的CqlBulkOutpuFormat样本,因为我之前猜测过。

我认为hadoop mr在HDFS中生成sstable文件并不需要证书Cassandra集群的信息,只需要shema和insert格式即可。而且,阅读hdfs文件和生成SSTABLE文件似乎不需要map和reduce方法。这是我对互联网上的一些信息的怀疑。

这是我的代码,也许是你眼中的许多错误,只是表明我已经尝试过了。

package hadooptest;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import org.apache.cassandra.hadoop.ConfigHelper;
import org.apache.cassandra.hadoop.cql3.CqlBulkOutputFormat;
import org.apache.directory.api.util.ByteBuffer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.MultipleOutputs;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;




public class CassandraBulkImporter extends Configured implements Tool{

private static final String CASSANDRA_KEYSPACE_NAME = "yanbo";
private static final  String CASSANDRA_TABLE_NAME = "test";
public static void main(String[] args) throws Exception {
            int exitCode = ToolRunner.run(new
                  CassandraBulkImporter(), args);
            System.exit(exitCode);
}


 public int run(String[] args) throws Exception {
            //配置信息
        Configuration conf = new Configuration();

         Job job =   Job.getInstance(conf, "CassandraBulkImporter");
         job.setJobName(CassandraBulkImporter.class.getName());
         job.setJarByClass(CassandraBulkImporter.class);
         job.setOutputFormatClass(CqlBulkOutputFormat.class);
                // 1.2对输入数据进行格式化处理的类
         job.setInputFormatClass(TextInputFormat.class);
         job.setMapperClass(HdfsMapper.class);
         job.setReducerClass(ReducerToCassandra.class);

          job.setMapOutputKeyClass(LongWritable.class);
          job.setMapOutputValueClass(Text.class);

          FileInputFormat.setInputPaths(job, new Path(args[0]));
         ConfigHelper.setOutputInitialAddress(job.getConfiguration(),"10.149.11.15");

         ConfigHelper.setOutputPartitioner(job.getConfiguration(),"Murmur3Partitioner");

         ConfigHelper.setOutputRpcPort(job.getConfiguration(), "9160");

         ConfigHelper.setOutputKeyspace(job.getConfiguration(),CASSANDRA_KEYSPACE_NAME);
         ConfigHelper.setOutputColumnFamily(
                     job.getConfiguration(),
                    CASSANDRA_KEYSPACE_NAME,
                    CASSANDRA_TABLE_NAME
           );
        //Set the properties for CqlBulkOutputFormat

        String KEYSPACE = "quote";
        String TABLE = "user_audience";
        String SCHEMA = String.format("CREATE TABLE %s.%s (" +
                                                              "id ascii, " +
                                                              "audience_ids ascii, " +
                                                              "PRIMARY KEY (id) " +
                                                          ")", KEYSPACE, TABLE);
        String INSERT_STMT = String.format("INSERT INTO %s.%s (" +
                "id, audience_ids" +
            ") VALUES (" +
                "?, ?" +
            ")", KEYSPACE, TABLE);

        MultipleOutputs.addNamedOutput(job,
                CASSANDRA_TABLE_NAME, 
                CqlBulkOutputFormat.class, Object.class, List.class);

        CqlBulkOutputFormat.setTableSchema(
         job.getConfiguration(), CASSANDRA_TABLE_NAME, 
                SCHEMA);

        CqlBulkOutputFormat.setTableInsertStatement(
                 job.getConfiguration(),
                CASSANDRA_TABLE_NAME, INSERT_STMT);


        return job.waitForCompletion(true) ? 0 : 1;

 }

 static class HdfsMapper extends 
        Mapper<LongWritable, Text, LongWritable, Text>  {
    public HdfsMapper(){}
    @Override
    public void map(LongWritable key, Text value,
      Context context)  throws IOException, InterruptedException {
        String s = value.toString();
        context.write(key, value);
    }
}

 static class ReducerToCassandra extends 
      Reducer<LongWritable, Text, Object,  List<ByteBuffer>> {
    public ReducerToCassandra(){}
    private MultipleOutputs multipleOutputs;

    @SuppressWarnings("unchecked")
      protected void setup(Context context) 
               throws IOException, InterruptedException {
          multipleOutputs = new MultipleOutputs(context);
     }

    @Override
     public void reduce(LongWritable id, 
     Iterable<Text> values,  Context context) 
     throws IOException,   InterruptedException {

         for(Text value : values )
         { 
             List<ByteBuffer> bVariables = 
                new ArrayList<ByteBuffer> ();
             for(String cell: value.toString().split("\001"))
             {                   
                 ByteBuffer buf  = new ByteBuffer();             
                 buf.append(cell.getBytes());
                 bVariables.add( buf);
             }
             multipleOutputs.write(CASSANDRA_TABLE_NAME,
       null,    bVariables);
          }           
     }
  }
}

17/02/20 21:14:25 INFO mapreduce.Job:地图100%减少0% 17/02/20 21:14:35 INFO mapreduce.Job:任务ID:attempt_1487380305027_71639_r_000000_0,状态:未通过 错误:org.apache.cassandra.exceptions.ConfigurationException:期望变量中的URI:[cassandra.config]。请在文件前面加上file:///表示本地文件,或者file:///表示远程文件。中止。如果从外部工具执行此操作,则需要设置Config.setClientMode(true)以避免加载配置。     在org.apache.cassandra.config.YamlConfigurationLoader.getStorageConfigURL(YamlConfigurationLoader.java:73)     在org.apache.cassandra.config.YamlConfigurationLoader.loadConfig(YamlConfigurationLoader.java:85)     在org.apache.cassandra.config.DatabaseDescriptor.loadConfig(DatabaseDescriptor.java:135)     在org.apache.cassandra.config.DatabaseDescriptor。(DatabaseDescriptor.java:119)     在org.apache.cassandra.hadoop.cql3.CqlBulkRecordWriter。(CqlBulkRecordWriter.java:110)     在org.apache.cassandra.hadoop.cql3.CqlBulkRecordWriter。(CqlBulkRecordWriter.java:94)     在org.apache.cassandra.hadoop.cql3.CqlBulkOutputFormat.getRecordWriter(CqlBulkOutputFormat.java:81)     在org.apache.cassandra.hadoop.cql3.CqlBulkOutputFormat.getRecordWriter(CqlBulkOutputFormat.java:55)     在org.apache.hadoop.mapred.ReduceTask $ NewTrackingRecordWriter。(ReduceTask.java:540)     在org.apache.hadoop.mapred.ReduceTask.runNewReducer(ReduceTask.java:614)     在org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:389)     在org.apache.hadoop.mapred.YarnChild $ 2.run(YarnChild.java:164)     at java.security.AccessController.doPrivileged(Native Method)     在javax.security.auth.Subject.doAs(Subject.java:415)     在org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)     在org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:158)

0 个答案:

没有答案