复制对指针或值的引用

时间:2017-02-14 20:07:49

标签: go

我想我理解here给出的答案,但为了以防万一,我想明确询问以下事项(如果您认为这是同一个问题,我表示道歉,但对我而言,对于这些问题感到不同):

func f() *int {
  d := 6
  pD := new(int)
  pD = &d // option 1
  *pD = d // option 2
  return pD
}

我只是将引用复制为指针的第一个选项是性能方面,更优化(这是教育猜测,但似乎很明显)。我更喜欢这种方法/模式。

第二种选择是(浅)复制(?)。我认为这种方法,因为它复制,我不担心GC席卷了' d的实例。由于我的不安全感(或作为初学者的无知),我经常使用这种方法。

我所担心的(或更多,不安全)是在第一种方法(转移地址)中,GC会识别它(' d& #39;变量)由指针容器引用,因此它不会被扫描?因此,使用这种方法是安全的吗?即我可以安全地绕过指针' pD'从func&f; f()'返回在申请的生命周期?

参考:https://play.golang.org/p/JWNf5yRd_B

2 个答案:

答案 0 :(得分:10)

没有比官方文件更好的地方了:

#include "stdafx.h"
#include "TestRecording.h"
#include "libffmpeg.h"

TestRecording::TestRecording()
{
}


TestRecording::~TestRecording()
{
}

struct RecordingContext
{
    RecordingContext()
    {
        formatContext = NULL;
        audioStream = NULL;
        audioFrame = NULL;
        audioFrameframeNumber = 0;
    }

    libffmpeg::AVFormatContext* formatContext;
    libffmpeg::AVStream* audioStream;
    libffmpeg::AVFrame* audioFrame;
    int audioFrameframeNumber;
};

static int AudioRecordCallback(const void *inputBuffer, void *outputBuffer,
    unsigned long framesPerBuffer,
    const PaStreamCallbackTimeInfo* timeInfo,
    PaStreamCallbackFlags statusFlags,
    void *userData)
{
    RecordingContext* recordingContext = (RecordingContext*)userData;

    libffmpeg::avcodec_fill_audio_frame(recordingContext->audioFrame,
        recordingContext->audioFrame->channels,
        recordingContext->audioStream->codec->sample_fmt,
        static_cast<const unsigned char*>(inputBuffer),
        (framesPerBuffer * sizeof(float) * recordingContext->audioFrame->channels),
        0);

    libffmpeg::AVPacket pkt;
    libffmpeg::av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    int gotpacket;
    int result = avcodec_encode_audio2(recordingContext->audioStream->codec, &pkt, recordingContext->audioFrame, &gotpacket);

    if (result < 0)
    {
        LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't encode the audio frame to acc");
        return paContinue;
    }

    if (gotpacket)
    {
        pkt.stream_index = recordingContext->audioStream->index;
        recordingContext->audioFrameframeNumber++;

        // this codec requires no bitstream filter, just send it to the muxer!
        result = libffmpeg::av_write_frame(recordingContext->formatContext, &pkt);
        if (result < 0)
        {
            LOG(ERROR) << "Couldn't write the encoded audio frame";
            libffmpeg::av_free_packet(&pkt);
            return paContinue;
        }

        libffmpeg::av_free_packet(&pkt);
    }

    return paContinue;
}

static bool InitializeRecordingContext(RecordingContext* recordingContext)
{
    int result = libffmpeg::avformat_alloc_output_context2(&recordingContext->formatContext, NULL, NULL, "C:\\Users\\Paul\\Desktop\\test.mp4");
    if (result < 0)
    {
        LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't create output format context");
        return false;
    }

    libffmpeg::AVCodec *audioCodec;
    audioCodec = libffmpeg::avcodec_find_encoder(libffmpeg::AV_CODEC_ID_AAC);
    if (audioCodec == NULL)
    {
        LOG(ERROR) << "Couldn't find the encoder for AAC";
    }

    recordingContext->audioStream = libffmpeg::avformat_new_stream(recordingContext->formatContext, audioCodec);
    if (!recordingContext->audioStream)
    {
        LOG(ERROR) << "Couldn't create the audio stream";
        return false;
    }

    recordingContext->audioStream->codec->bit_rate = 64000;
    recordingContext->audioStream->codec->sample_fmt = libffmpeg::AV_SAMPLE_FMT_FLTP;
    recordingContext->audioStream->codec->sample_rate = 48000;
    recordingContext->audioStream->codec->channel_layout = AV_CH_LAYOUT_STEREO;
    recordingContext->audioStream->codec->channels = libffmpeg::av_get_channel_layout_nb_channels(recordingContext->audioStream->codec->channel_layout);

    recordingContext->audioStream->codecpar->bit_rate = recordingContext->audioStream->codec->bit_rate;
    recordingContext->audioStream->codecpar->format = recordingContext->audioStream->codec->sample_fmt;
    recordingContext->audioStream->codecpar->sample_rate = recordingContext->audioStream->codec->sample_rate;
    recordingContext->audioStream->codecpar->channel_layout = recordingContext->audioStream->codec->channel_layout;
    recordingContext->audioStream->codecpar->channels = recordingContext->audioStream->codec->channels;

    result = libffmpeg::avcodec_open2(recordingContext->audioStream->codec, audioCodec, NULL);
    if (result < 0)
    {
        LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't open the audio codec");
        return false;
    }

    // create a new frame to store the audio samples
    recordingContext->audioFrame = libffmpeg::av_frame_alloc();
    if (!recordingContext->audioFrame)
    {
        LOG(ERROR) << "Couldn't alloce the output audio frame";
        return false;
    }

    recordingContext->audioFrame->nb_samples = recordingContext->audioStream->codec->frame_size;
    recordingContext->audioFrame->channel_layout = recordingContext->audioStream->codec->channel_layout;
    recordingContext->audioFrame->channels = recordingContext->audioStream->codec->channels;
    recordingContext->audioFrame->format = recordingContext->audioStream->codec->sample_fmt;
    recordingContext->audioFrame->sample_rate = recordingContext->audioStream->codec->sample_rate;

    result = libffmpeg::av_frame_get_buffer(recordingContext->audioFrame, 0);
    if (result < 0)
    {
        LOG(ERROR) << "Coudln't initialize the output audio frame buffer";
        return false;
    }

    // some formats want video_stream headers to be separate  
    if (!strcmp(recordingContext->formatContext->oformat->name, "mp4") || !strcmp(recordingContext->formatContext->oformat->name, "mov") || !strcmp(recordingContext->formatContext->oformat->name, "3gp"))
    {
        recordingContext->audioStream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }

    // open the ouput file
    if (!(recordingContext->formatContext->oformat->flags & AVFMT_NOFILE))
    {
        result = libffmpeg::avio_open(&recordingContext->formatContext->pb, recordingContext->formatContext->filename, AVIO_FLAG_WRITE);
        if (result < 0)
        {
            LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't open the output file");
            return false;
        }
    }

    // write the stream headers
    result = libffmpeg::avformat_write_header(recordingContext->formatContext, NULL);
    if (result < 0)
    {
        LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't write the headers to the file");
        return false;
    }

    return true;
}

static bool FinalizeRecordingContext(RecordingContext* recordingContext)
{
    int result = 0;

    // write the trailing information
    if (recordingContext->formatContext->pb)
    {
        result = libffmpeg::av_write_trailer(recordingContext->formatContext);
        if (result < 0)
        {
            LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't write the trailer information");
            return false;
        }
    }

    // close all the codes
    for (int i = 0; i < (int)recordingContext->formatContext->nb_streams; i++)
    {
        result = libffmpeg::avcodec_close(recordingContext->formatContext->streams[i]->codec);
        if (result < 0)
        {
            LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't close the codec");
            return false;
        }
    }

    // close the output file
    if (recordingContext->formatContext->pb)
    {
        if (!(recordingContext->formatContext->oformat->flags & AVFMT_NOFILE))
        {
            result = libffmpeg::avio_close(recordingContext->formatContext->pb);
            if (result < 0)
            {
                LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't close the output file");
                return false;
            }
        }
    }

    // free the format context and all of its data
    libffmpeg::avformat_free_context(recordingContext->formatContext);

    recordingContext->formatContext = NULL;
    recordingContext->audioStream = NULL;

    if (recordingContext->audioFrame)
    {
        libffmpeg::av_frame_free(&recordingContext->audioFrame);
        recordingContext->audioFrame = NULL;
    }

    return true;
}

int TestRecording::Test()
{
    PaError result = paNoError;

    result = Pa_Initialize();
    if (result != paNoError) LOGINT_WITH_MESSAGE(ERROR, result, "Error initializing audio device framework");

    RecordingContext recordingContext;
    if (!InitializeRecordingContext(&recordingContext))
    {
        LOG(ERROR) << "Couldn't start recording file";
        return 0;
    }

    auto defaultDevice = Pa_GetDefaultInputDevice();
    auto deviceInfo = Pa_GetDeviceInfo(defaultDevice);

    PaStreamParameters  inputParameters;
    inputParameters.device = defaultDevice;
    inputParameters.channelCount = 2;
    inputParameters.sampleFormat = paFloat32;
    inputParameters.suggestedLatency = deviceInfo->defaultLowInputLatency;
    inputParameters.hostApiSpecificStreamInfo = NULL;

    PaStream* stream = NULL;
    result = Pa_OpenStream(
        &stream,
        &inputParameters,
        NULL,
        48000,
        1024,
        paClipOff,
        AudioRecordCallback,
        &recordingContext);
    if (result != paNoError)LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't open the audio stream");

    result = Pa_StartStream(stream);
    if (result != paNoError)LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't start the audio stream");

    Sleep(1000 * 5);

    result = Pa_StopStream(stream);
    if (result != paNoError)LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't stop the audio stream");

    if (!FinalizeRecordingContext(&recordingContext)) LOG(ERROR) << "Couldn't stop recording file";

    result = Pa_CloseStream(stream);
    if (result != paNoError)LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't stop the audio stream");

    return 0;
}
     

请注意,与C不同,返回地址是完全可以的   局部变量;与变量关联的存储仍然存在   函数返回后。实际上,取一个复合的地址   literal每次评估时都会分配一个新实例,所以我们   可以结合最后两行。

(来源:"Effective Go"

因此第一个选项(返回指向局部变量的指针)绝对安全,甚至鼓励。通过执行escape analysis,编译器可以告诉变量转义其本地作用域并将其分配给堆。

答案 1 :(得分:3)

简而言之:否。

第一:没有&#34;参考&#34;在Go。现在忘记这个想法,否则你会伤到自己。真。思考&#34;参考&#34;是完全错的。

第二:表现完全相同。暂时忘掉这种类型的纳米优化。特别是在处理int时。当且仅当您遇到性能问题时:测量,然后进行优化。思考&#34;在8字节的小指针周围移动必须比复制30或甚至100字节的结构要快得多。&#34;它不是,至少不是那么简单。

第三:只需将其写为func f() *int { d := 6; return &d; }即可。这里没有必要做任何花哨的舞蹈。

第四:选项2制作一个&#34;深拷贝&#34;的int。但这可能会产生误导,因为没有&#34;浅拷贝&#34;一个int所以我不确定我是否明白你在这里问的是什么。 Go没有深层和浅层副本的概念。如果复制指针值,则复制指针值。你还记得第一点吗? Go中没有引用。指针值是一个值,如果复制,您有一个指针值的副本。这样的副本对指向的价值绝对没有任何意义,特别是它不会复制。这将暗示Go中的副本不是&#34;深&#34;。在谈论Go时,忘记深/浅拷贝。 (当然,您可以实现执行&#34;深度复制&#34;自定义对象的功能)

第五:Go有一个正确的 工作垃圾收集器。你所做的事情完全没有区别:当一个物体存在时,它不会被收集,一旦它被收集,它就会被收集。您可以传递,返回,复制,移交,获取地址,取消引用指针或任何您喜欢的内容,这无关紧要。 GC正常工作。 (除非你故意通过使用不安全包来寻找痛苦和错误。)