char *指针不会在函数中递增

时间:2017-01-04 02:41:17

标签: c pointers lexer

我知道指针和指针运算是如何工作的,但这让我感到困惑。

我一段时间做了一个词法分析器,现在我正在修改它,所以我可以使用它的解析器,但由于某种原因,它不会迭代char*我传递给词法分析器函数。

我在malloc()中添加了一个char指针,然后将其传递给迭代它的函数。它执行迭代但由于某种原因它重置回原始地址。我需要char *能够在迭代后更改并保持在它完成的地址,否则我无法完成正确的解析器......

的main.c

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "lex.h"

int main(int argc, char **argv)
{
    struct lexer *token_queue = NULL;

    FILE *srcfile = fopen( argv[1], "r+" );
    if (!srcfile) {
        fprintf(stderr, "file \"%s\" not found, is null\n", argv[1]);
        lexer_destroy(&token_queue);
        goto hell;
    }
    fseek(srcfile, 0, SEEK_END);    // go all the way to end of file
    long file_len = ftell(srcfile); // get total size of the file
    rewind(srcfile);        // reset the file reader to beginning

    char *srcBuffer = malloc(sizeof(char) * (file_len+1));  // pretty sure it's safe to assume char is 1 byte...
    memset(srcBuffer, '\0', sizeof(char) * (file_len+1));   // zero the whole buffer

    fread(srcBuffer, sizeof(char), (file_len+1), srcfile);  // get entire file as a string
    char decision[20];

    do {
        printf("print a token?\n");
        scanf("%19s", decision);
        lexer_get_single_token(srcBuffer, &token_queue); // tokenize baby!

        //printf(" *srcBuffer == %c \n", *srcBuffer);
        print_tokens_colored(&token_queue);
    }
    while (decision[0] == 'y') ;    // slowly simulate parser asking for another token!

    free(srcBuffer); srcBuffer = NULL;
    fclose(srcfile); srcfile = NULL;

    lexer_destroy(&token_queue);
hell:;
    return 0;
}

lexer功能

void lexer_get_single_token(char *iter, struct lexer **Q)
{
    char wording[512] = "";     // buffer to hold identifiers, keywords, and strings
    unsigned int i = 0;

    const char *keywords[] = {
        "auto", "const", "double", "float", "int", "short", "struct", "unsigned",
        "break", "continue", "else", "for", "long", "signed", "switch", "void",
        "case", "default", "enum", "goto", "register", "sizeof", "typedef", "volatile",
        "char", "do", "extern", "if", "return", "static", "union", "while",
        "inline", "alignof", "_Generic", "bool", "_Bool", "true", "false"
    };
    printf("*iter == %c\n", *iter);
    while ( *iter != '\0' ) {
        while ( is_space(*iter) )
            ++iter;

        if (*iter == '/' && iter[1] == '*') {       // found C style /**/ comment
            do {
                ++iter;
            }
            while ( !(*iter == '*' && iter[1] == '/') );    // continuously skip until we find a */
            iter += 2;  // skip twice to pass over */ and go to the next token.
        }

        if (*iter == '/' && iter[1] == '/') {       // found C++ style // comment
            while ( *iter != '\n' )
                ++iter;     // skip until the next line which will be skipped itself.
        }

        if (*iter == '\\' && iter[1] == '\n') {     // formatting Left slash check
            lexer_add_token(Q, LeftSlash, "\\", 2);
            iter += 2;
            return;
        }

        if (*iter == '\"') {    // found string literal, adjust for "\\" so we won't crash
            wording[i++] = *iter++;     // add the first double quote to buffer
            while ( *iter != '\"' ) {
                if (*iter == '\\' && iter[1] == '\"' && iter[-1] != '\\') {
                    wording[i++] = *iter++;     // add the literal double quote as well
                }
                wording[i++] = *iter++;
            }
            wording[i++] = *iter++;     // found the ending double quote, add that too.

            if (wording[0] != '\0') {
                lexer_add_token(Q, StringConstant, wording, i+1);
                reset_string(wording);
                return;
            }
        }

        if ( *iter == '\'' ) {  // found character literal, adjust for '\\' so we won't crash
            wording[i++] = *iter++;
            int counter=0;
            while (*iter != '\'' && counter < 2) {      // Same operation as the string literal but limit as char
                if (*iter == '\\' && iter[1] == '\'' && iter[-1] != '\\') {
                    wording[i++] = *iter++;
                }
                wording[i++] = *iter++;
                ++counter;
            }
            wording[i++] = *iter++;     // add ending single quote to buffer

            if (wording[0] != '\0') {
                lexer_add_token(Q, CharConstant, wording, i+1);
                reset_string(wording);
                return;
            }
        }

        if (*iter == '0' && (iter[1] == 'x' || iter[1] == 'X')) {   // found hexadecimal constant
            wording[i++] = *iter++;     // copy both 0 and x to buffer
            wording[i++] = *iter++;

            while ( is_numeral(*iter) ) {
                wording[i++] = *iter++;     // copy numbers and letters A to F
            }
            if ( *iter == '.' && is_numeral(iter[1]) ) {    // found hexadecimal float
                wording[i++] = *iter++;
                while ( is_numeral(*iter) )
                    wording[i++] = *iter++;
                if (*iter == 'p' && is_numeral(iter[1])) {  // stuff like 0x0.3p10.
                    wording[i++] = *iter++;
                    while ( is_numeral(*iter) )
                        wording[i++] = *iter++;
                }
                if (wording[0] != '\0') {
                    lexer_add_token(Q, NumConstantHexFloat, wording, i+1);
                    reset_string(wording);
                    return;
                }
            }
            else {      // we didn't find a decimal, so tokenize what we found as a normal hex constant
                if (wording[0] != '\0') {
                    lexer_add_token(Q, NumConstantHex, wording, i+1);
                    reset_string(wording);
                    return;
                }
            }
        }

        while ( is_numeric(*iter) ) {   // found decimal constant
            wording[i++] = *iter++;
        }
        if ( *iter == '.' && is_numeric(iter[1]) ) {    // found floating point number
            wording[i++] = *iter++;     // add in the decimal char
            while ( is_numeric(*iter) )
                wording[i++] = *iter++;

            // add the 'e' constant for large floats as well as 'p' (power) constant
            if ( (*iter == 'p' || *iter == 'P' || *iter == 'e' || *iter == 'E') && is_numeric(iter[1]) )
            {
                wording[i++] = *iter++;
                while ( is_numeric(*iter) )
                    wording[i++] = *iter++;
            }
            if (*iter == 'f' || *iter == 'F')   // stuff like 2.0f, add that into the buffer!
                wording[i++] = *iter++;

            if (wording[0] != '\0') {
                lexer_add_token(Q, NumConstantReal, wording, i+1);
                reset_string(wording);
                return;
            }
        }
        else {      // no decimal, consider it a natural number
            if (wording[0] != '\0') {
                lexer_add_token(Q, NumConstant, wording, i+1);
                reset_string(wording);
                return;
            }
        }

        if (is_alphabetic(*iter)) { // found an identifier or potential keyword
            while (is_potential_identifier(*iter))
                wording[i++] = *iter++;

            if (wording[0] != '\0') {
                int x;
                int found_keyword = 0;
                for ( x=0 ; x<sizeof keywords/sizeof keywords[0] ; ++x ) {
                    if ( !strcmp(wording, keywords[x]) )
                        found_keyword = 1;
                }
                if (found_keyword)
                    lexer_add_token(Q, Keyword, wording, i+1);
                else lexer_add_token(Q, NumIdent, wording, i+1);
                reset_string(wording);
                return;
            }
        }

        switch ( *iter ) {  // add in individual characters
            case '=':
                if (iter[1] == '=') {
                    ++iter;
                    lexer_add_token(Q, EqualCmp, "==", 3);
                }
                else lexer_add_token(Q, Equal, "=", 2);
                ++iter;
                return;
            case ';':
                lexer_add_token(Q, Semicolon, ";", 2);
                ++iter;
                return;
            case ':':
                lexer_add_token(Q, Colon, ";", 2);
                ++iter;
                return;
            case '+':   // possible uses => left unary is positive, twice unary is increment, once binary is addition
                if (iter[1] == '=') {
                    ++iter;
                    lexer_add_token(Q, PlusEqual, "+=", 3);
                }
                else if (iter[1] == '+') {
                    ++iter;
                    lexer_add_token(Q, Increment, "++", 3);
                }
                else lexer_add_token(Q, Plus, "+", 2);
                ++iter;
                return;
            case '-':   // possible uses => left unary is negating, twice unary is decrement, one binary is minus
                if (iter[1] == '=') {
                    ++iter;
                    lexer_add_token(Q, MinusEqual, "-=", 3);
                }
                else if (iter[1] == '-') {
                    ++iter;
                    lexer_add_token(Q, Decrement, "--", 3);
                }
                else if (iter[1] == '>') {
                    ++iter;
                    lexer_add_token(Q, Arrow, "->", 3);
                }
                else lexer_add_token(Q, Dash, "-", 2);
                ++iter;
                return;
            case '*':   // leftward unary is dereferencing ptr, binary be mult. Also check for / as ending comment
                if (iter[1] == '=') {
                    ++iter;
                    lexer_add_token(Q, MultEqual, "*=", 3);
                }
                else lexer_add_token(Q, Asterisk, "*", 2);
                ++iter;
                return;
            case '/':   // check for * and / as comment EDIT: DONE
                if (iter[1] == '=') {
                    ++iter;
                    lexer_add_token(Q, DivEqual, "/=", 3);
                }
                else lexer_add_token(Q, DivSlash, "/", 2);
                ++iter;
                return;
            case '(':
                lexer_add_token(Q, LeftParens, "(", 2);
                ++iter;
                return;
            case ')':
                lexer_add_token(Q, RiteParens, ")", 2);
                ++iter;
                return;
            case '[':
                lexer_add_token(Q, LeftSqBracket, "[", 2);
                ++iter;
                return;
            case ']':
                lexer_add_token(Q, RightSqBracket, "]", 2);
                ++iter;
                return;
            case '{':
                lexer_add_token(Q, LeftCurlBrace, "{", 2);
                ++iter;
                return;
            case '}':
                lexer_add_token(Q, RightCurlBrace, "}", 2);
                ++iter;
                return;
            case '.':
                if (iter[1] == '.' && iter[2] == '.') {
                    iter += 2;
                    lexer_add_token(Q, Ellipses, "...", 4);
                }
                else lexer_add_token(Q, Dot, ".", 2);
                ++iter;
                return;
            case ',':
                lexer_add_token(Q, Comma, ",", 2);
                ++iter;
                return;
            case '<':
                if (iter[1] == '<') {
                    if (iter[2] == '=') {
                        lexer_add_token(Q, LeftBitShiftEqual, "<<=", 4);
                        iter += 2;
                    }
                    else {
                        lexer_add_token(Q, LeftBitShift, "<<", 3);
                        ++iter;
                    }
                }
                else if (iter[1] == '=') {
                    lexer_add_token(Q, LessEqual, "<=", 3);
                    ++iter;
                }
                else lexer_add_token(Q, LeftArrow, "<", 2);
                ++iter;
                return;
            case '>':
                if (iter[1] == '>') {
                    if (iter[2] == '=') {
                        lexer_add_token(Q, RightBitShiftEqual, ">>=", 4);
                        iter += 2;
                    }
                    else {
                        lexer_add_token(Q, RightBitShift, ">>", 3);
                        ++iter;
                    }
                }
                else if (iter[1] == '=') {
                    lexer_add_token(Q, GreaterEqual, ">=", 3);
                    ++iter;
                }
                else lexer_add_token(Q, RightArrow, ">", 2);
                ++iter;
                return;
            case '?':
                lexer_add_token(Q, QuestionMark, "?", 2);
                ++iter;
                return;
            case '#':
                lexer_add_token(Q, HashSym, "#", 2);
                ++iter;
                return;
            case '&':
                if (iter[1] == '=') {
                    ++iter;
                    lexer_add_token(Q, AndEqual, "&=", 3);
                }
                else if (iter[1] == '&') {
                    ++iter;
                    lexer_add_token(Q, BoolAnd, "&&", 3);
                }
                else lexer_add_token(Q, Ampersand, "&", 2);
                ++iter;
                return;
            case '^':
                if (iter[1] == '=') {
                    ++iter;
                    lexer_add_token(Q, XorEqual, "^=", 3);
                }
                else lexer_add_token(Q, Carot, "^", 2);
                ++iter;
                return;
            case '%':
                if (iter[1] == '=') {
                    ++iter;
                lexer_add_token(Q, ModuloEqual, "%=", 3);
            }
            else lexer_add_token(Q, Percent, "%", 2);
            ++iter;
            return;
        case '!':
            if (iter[1] == '=') {
                ++iter;
                lexer_add_token(Q, NotEqual, "!=", 3);
            }
            else lexer_add_token(Q, ExclamationMark, "!", 2);
            ++iter;
            return;
        case '|':
            if (iter[1] == '=') {
                ++iter;
                lexer_add_token(Q, OrEqual, "|=", 3);
            }
            else if (iter[1] == '|') {
                ++iter;
                lexer_add_token(Q, BoolOr, "||", 3);
            }
            else lexer_add_token(Q, VerticalBar, "|", 2);
            ++iter;
            return;
        case '~':
            lexer_add_token(Q, Tilde, "~", 2);
            ++iter;
            return;
        case '@':
            lexer_add_token(Q, AtSign, "@", 2);
            ++iter;
            return;
        case '$':
            lexer_add_token(Q, DollarSign, "$", 2);
            ++iter;
            return;
        case '`':
            lexer_add_token(Q, GraveAccent, "`", 2);
            ++iter;
            return;
    }
    ++iter;
}

}

1 个答案:

答案 0 :(得分:1)

C函数按值传递参数,因此在函数lexer_get_single_token()内,iter是指针srcBuffer的副本。这意味着iter的价值变化不会反映在srcBuffer中。如果要保留对函数内部iter所做的更改,可以将指针返回到调用函数,或者添加另一层间接函数。

将功能签名更改为:

char * lexer_get_single_token(char *iter, struct lexer **Q);

此函数返回指向char的指针,并且在函数完成时可以返回iter。函数调用将如下所示:

srcBuffer = lexer_get_single_token(srcBuffer, &token_queue);

函数调用后,srcBuffer指向函数结束时iter指示的位置。您应该保存原始srcBuffer指针的副本,以便以后可以free

或者,您可以将功能签名更改为:

void lexer_get_single_token(char **iter, struct lexer **Q);

现在函数调用将如下所示:

lexer_get_single_token(&srcBuffer, &token_queue);

需要修改函数中的代码以考虑额外的间接,但由于您传入指向srcBuffer的指针,因此您可以对srcBuffer进行修改。