将音频与歌词同步

时间:2014-08-10 11:22:37

标签: android audio android-mediaplayer

我正在尝试同步音频文件和歌词文件(String格式)。在SO上关注了很多帖子后,我尝试使用Speech To Text从音频中获取文本。但它不听音频,它等待演讲。并且歌词文件需要根据歌曲转换为SRT文件。以下怎么可能?

  1. 使用TTS收听音频文件?这是正确的方法吗?
  2. 如何将歌词文件(字符串格式)转换为SRT文件?
  3. 我们将非常感谢任何建议/指导。 下面的代码等着我说一些话。但我需要它来听音频文件播放。 我到目前为止已尝试过的代码:

    @Override
    protected void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
    sr = SpeechRecognizer.createSpeechRecognizer(this);       
        sr.setRecognitionListener(new listener());    
    
    
     class listener implements RecognitionListener          
       {
                public void onReadyForSpeech(Bundle params)
                {
                         Log.d(TAG, "onReadyForSpeech");
                }
                public void onBeginningOfSpeech()
                {
                         Log.d(TAG, "onBeginningOfSpeech");
                }
                public void onRmsChanged(float rmsdB)
                {
                         Log.d(TAG, "onRmsChanged");
                }
                public void onBufferReceived(byte[] buffer)
                {
                         Log.d(TAG, "onBufferReceived");
                }
                public void onEndOfSpeech()
                {
                         Log.d(TAG, "onEndofSpeech");
                }
                public void onError(int error)
                {
                    String mError=null;
                    Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
                    intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
                            RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
    
    
                     switch (error) {
                     case SpeechRecognizer.ERROR_NETWORK_TIMEOUT:                
                         mError = " network timeout"; 
                         sr.startListening(intent);
                         break;
                     case SpeechRecognizer.ERROR_NETWORK: 
                         mError = " network" ;
                         //toast("Please check data bundle or network settings");
                         return;
                     case SpeechRecognizer.ERROR_AUDIO: 
                         mError = " audio"; 
                         break;
                     case SpeechRecognizer.ERROR_SERVER: 
                         mError = " server"; 
                         sr.startListening(intent);
    
                         break;
                     case SpeechRecognizer.ERROR_CLIENT: 
                         mError = " client"; 
                         break;
                     case SpeechRecognizer.ERROR_SPEECH_TIMEOUT: 
                         mError = " speech time out" ; 
                         break;
                     case SpeechRecognizer.ERROR_NO_MATCH: 
                         mError = " no match" ; 
                         sr.startListening(intent);
    
    
                         break;
                     case SpeechRecognizer.ERROR_RECOGNIZER_BUSY: 
                         mError = " recogniser busy" ; 
                         break;
                     case SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS: 
                         mError = " insufficient permissions" ; 
                         break;
    
                     }    
    
                    Log.d(TAG,  "error " +  mError);
    
                }
                public void onResults(Bundle results)                   
                {
                         String str = new String();
                         Log.d(TAG, "onResults " + results);
                         ArrayList data = results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
                         for (int i = 0; i < data.size(); i++)
                         {
                                   Log.d(TAG, "result " + data.get(i));
                                   str += data.get(i);
                         }
    
                }
                public void onPartialResults(Bundle partialResults)
                {
                         Log.d(TAG, "onPartialResults");
                }
                public void onEvent(int eventType, Bundle params)
                {
                         Log.d(TAG, "onEvent " + eventType);
                }
       }
    
    public void onSongPicked()
    {
    Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
            intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
                    RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
    
            sr.startListening(intent);}
    

0 个答案:

没有答案