[anjuta] symbol-db: ctags updated to svn rev. 749
- From: Massimo Cora' <mcora src gnome org>
- To: svn-commits-list gnome org
- Cc:
- Subject: [anjuta] symbol-db: ctags updated to svn rev. 749
- Date: Wed, 23 Dec 2009 23:25:41 +0000 (UTC)
commit 29d2674150385862e846c608d369ae9fef51a678
Author: Massimo Corà <mcora src gnome org>
Date: Wed Dec 23 23:16:48 2009 +0100
symbol-db: ctags updated to svn rev. 749
plugins/symbol-db/anjuta-tags/ant.c | 4 +-
plugins/symbol-db/anjuta-tags/eiffel.c | 20 ++-
plugins/symbol-db/anjuta-tags/lregex.c | 4 +-
plugins/symbol-db/anjuta-tags/php.c | 6 +-
plugins/symbol-db/anjuta-tags/python.c | 4 +-
plugins/symbol-db/anjuta-tags/sort.c | 4 +-
plugins/symbol-db/anjuta-tags/sql.c | 274 ++++++++++++++++++++++++++++----
plugins/symbol-db/anjuta-tags/tex.c | 35 ++---
8 files changed, 282 insertions(+), 69 deletions(-)
---
diff --git a/plugins/symbol-db/anjuta-tags/ant.c b/plugins/symbol-db/anjuta-tags/ant.c
index eedfcec..bd01de4 100644
--- a/plugins/symbol-db/anjuta-tags/ant.c
+++ b/plugins/symbol-db/anjuta-tags/ant.c
@@ -24,9 +24,9 @@
static void installAntRegex (const langType language)
{
addTagRegex (language,
- "^[ \t]*<[ \t]*project.*name=\"([^\"]+)\".*", "\\1", "p,project,projects", NULL);
+ "^[ \t]*<[ \t]*project[^>]+name=\"([^\"]+)\".*", "\\1", "p,project,projects", NULL);
addTagRegex (language,
- "^[ \t]*<[ \t]*target.*name=\"([^\"]+)\".*", "\\1", "t,target,targets", NULL);
+ "^[ \t]*<[ \t]*target[^>]+name=\"([^\"]+)\".*", "\\1", "t,target,targets", NULL);
}
extern parserDefinition* AntParser ()
diff --git a/plugins/symbol-db/anjuta-tags/eiffel.c b/plugins/symbol-db/anjuta-tags/eiffel.c
index b504ac3..e2f5a5c 100644
--- a/plugins/symbol-db/anjuta-tags/eiffel.c
+++ b/plugins/symbol-db/anjuta-tags/eiffel.c
@@ -1,5 +1,5 @@
/*
-* $Id: eiffel.c 706 2009-06-28 23:09:30Z dhiebert $
+* $Id: eiffel.c 748 2009-11-06 02:44:42Z dhiebert $
*
* Copyright (c) 1998-2002, Darren Hiebert
*
@@ -53,13 +53,15 @@ typedef enum eException { ExceptionNone, ExceptionEOF } exception_t;
*/
typedef enum eKeywordId {
KEYWORD_NONE = -1,
- KEYWORD_alias, KEYWORD_all, KEYWORD_and, KEYWORD_as, KEYWORD_assign,
+ KEYWORD_alias, KEYWORD_all, KEYWORD_and,
+ KEYWORD_as, KEYWORD_assign, KEYWORD_attached,
KEYWORD_check, KEYWORD_class, KEYWORD_convert, KEYWORD_create,
KEYWORD_creation, KEYWORD_Current,
- KEYWORD_debug, KEYWORD_deferred, KEYWORD_do, KEYWORD_else,
- KEYWORD_elseif, KEYWORD_end, KEYWORD_ensure, KEYWORD_expanded,
- KEYWORD_export, KEYWORD_external, KEYWORD_false, KEYWORD_feature,
- KEYWORD_from, KEYWORD_frozen, KEYWORD_if, KEYWORD_implies,
+ KEYWORD_debug, KEYWORD_deferred, KEYWORD_detachable, KEYWORD_do,
+ KEYWORD_else, KEYWORD_elseif, KEYWORD_end, KEYWORD_ensure,
+ KEYWORD_expanded, KEYWORD_export, KEYWORD_external,
+ KEYWORD_false, KEYWORD_feature, KEYWORD_from, KEYWORD_frozen,
+ KEYWORD_if, KEYWORD_implies,
KEYWORD_indexing, KEYWORD_infix, KEYWORD_inherit, KEYWORD_inspect,
KEYWORD_invariant, KEYWORD_is, KEYWORD_like, KEYWORD_local,
KEYWORD_loop, KEYWORD_not, KEYWORD_obsolete, KEYWORD_old, KEYWORD_once,
@@ -154,6 +156,7 @@ static const keywordDesc EiffelKeywordTable [] = {
{ "and", KEYWORD_and },
{ "as", KEYWORD_as },
{ "assign", KEYWORD_assign },
+ { "attached", KEYWORD_attached },
{ "check", KEYWORD_check },
{ "class", KEYWORD_class },
{ "convert", KEYWORD_convert },
@@ -162,6 +165,7 @@ static const keywordDesc EiffelKeywordTable [] = {
{ "current", KEYWORD_Current },
{ "debug", KEYWORD_debug },
{ "deferred", KEYWORD_deferred },
+ { "detachable", KEYWORD_detachable },
{ "do", KEYWORD_do },
{ "else", KEYWORD_else },
{ "elseif", KEYWORD_elseif },
@@ -870,7 +874,9 @@ static boolean parseType (tokenInfo *const token)
}
else
{
- if (isKeyword (id, KEYWORD_expanded))
+ if (isKeyword (id, KEYWORD_attached) ||
+ isKeyword (id, KEYWORD_detachable) ||
+ isKeyword (id, KEYWORD_expanded))
{
copyToken (id, token);
readToken (token);
diff --git a/plugins/symbol-db/anjuta-tags/lregex.c b/plugins/symbol-db/anjuta-tags/lregex.c
index 59f5df6..37d7ea0 100644
--- a/plugins/symbol-db/anjuta-tags/lregex.c
+++ b/plugins/symbol-db/anjuta-tags/lregex.c
@@ -1,5 +1,5 @@
/*
-* $Id: lregex.c 576 2007-06-30 04:16:23Z elliotth $
+* $Id: lregex.c 747 2009-11-06 02:33:37Z dhiebert $
*
* Copyright (c) 2000-2003, Darren Hiebert
*
@@ -408,7 +408,7 @@ static void processLanguageRegex (const langType language,
const char* regexfile = parameter + 1;
FILE* const fp = fopen (regexfile, "r");
if (fp == NULL)
- error (WARNING | PERROR, regexfile);
+ error (WARNING | PERROR, "%s", regexfile);
else
{
vString* const regex = vStringNew ();
diff --git a/plugins/symbol-db/anjuta-tags/php.c b/plugins/symbol-db/anjuta-tags/php.c
index f7b91f8..90e096a 100644
--- a/plugins/symbol-db/anjuta-tags/php.c
+++ b/plugins/symbol-db/anjuta-tags/php.c
@@ -1,5 +1,5 @@
/*
-* $Id: php.c 729 2009-07-10 23:34:21Z jafl $
+* $Id: php.c 734 2009-08-20 23:33:54Z jafl $
*
* Copyright (c) 2000, Jesus Castagnetto <jmcastagnetto zkey com>
*
@@ -64,8 +64,8 @@ static kindOption PhpKinds [] = {
static void installPHPRegex (const langType language)
{
- addTagRegex(language, "^[ \t]*(abstract[ \t]+)?class[ \t]+([" ALPHA "_][" ALNUM "_]*)",
- "\\2", "c,class,classes", NULL);
+ addTagRegex(language, "^[ \t]*((final|abstract)[ \t]+)*class[ \t]+([" ALPHA "_][" ALNUM "_]*)",
+ "\\3", "c,class,classes", NULL);
addTagRegex(language, "^[ \t]*interface[ \t]+([" ALPHA "_][" ALNUM "_]*)",
"\\1", "i,interface,interfaces", NULL);
addTagRegex(language, "^[ \t]*define[ \t]*\\([ \t]*['\"]?([" ALPHA "_][" ALNUM "_]*)",
diff --git a/plugins/symbol-db/anjuta-tags/python.c b/plugins/symbol-db/anjuta-tags/python.c
index 5fdf31b..13367d6 100644
--- a/plugins/symbol-db/anjuta-tags/python.c
+++ b/plugins/symbol-db/anjuta-tags/python.c
@@ -1,5 +1,5 @@
/*
-* $Id: python.c 720 2009-07-07 03:55:23Z dhiebert $
+* $Id: python.c 738 2009-08-26 13:18:03Z elias $
*
* Copyright (c) 2000-2003, Darren Hiebert
*
@@ -240,7 +240,7 @@ static const char *skipEverything (const char *cp)
{
for (; *cp; cp++)
{
- if (*cp == '"' || *cp == '\'')
+ if (*cp == '"' || *cp == '\'' || *cp == '#')
{
cp = skipString(cp);
if (!*cp) break;
diff --git a/plugins/symbol-db/anjuta-tags/sort.c b/plugins/symbol-db/anjuta-tags/sort.c
index 09ba87a..c58defc 100644
--- a/plugins/symbol-db/anjuta-tags/sort.c
+++ b/plugins/symbol-db/anjuta-tags/sort.c
@@ -1,5 +1,5 @@
/*
-* $Id: sort.c 498 2007-02-17 22:43:15Z dhiebert $
+* $Id: sort.c 747 2009-11-06 02:33:37Z dhiebert $
*
* Copyright (c) 1996-2002, Darren Hiebert
*
@@ -109,7 +109,7 @@ static void failedSort (FILE *const fp, const char* msg)
if (fp != NULL)
fclose (fp);
if (msg == NULL)
- error (FATAL | PERROR, cannotSort);
+ error (FATAL | PERROR, "%s", cannotSort);
else
error (FATAL, "%s: %s", msg, cannotSort);
}
diff --git a/plugins/symbol-db/anjuta-tags/sql.c b/plugins/symbol-db/anjuta-tags/sql.c
index efe7e5d..e01c5d4 100644
--- a/plugins/symbol-db/anjuta-tags/sql.c
+++ b/plugins/symbol-db/anjuta-tags/sql.c
@@ -1,5 +1,5 @@
/*
- * $Id: sql.c 703 2009-03-14 22:06:12Z dfishburn $
+ * $Id: sql.c 745 2009-10-27 02:42:55Z dfishburn $
*
* Copyright (c) 2002-2003, Darren Hiebert
*
@@ -65,9 +65,14 @@ typedef enum eKeywordId {
KEYWORD_end,
KEYWORD_function,
KEYWORD_if,
+ KEYWORD_else,
+ KEYWORD_elseif,
+ KEYWORD_endif,
KEYWORD_loop,
+ KEYWORD_while,
KEYWORD_case,
KEYWORD_for,
+ KEYWORD_do,
KEYWORD_call,
KEYWORD_package,
KEYWORD_pragma,
@@ -114,6 +119,7 @@ typedef enum eKeywordId {
KEYWORD_ml_conn_dnet,
KEYWORD_ml_conn_java,
KEYWORD_ml_conn_chk,
+ KEYWORD_ml_prop,
KEYWORD_local,
KEYWORD_temporary,
KEYWORD_drop,
@@ -140,6 +146,7 @@ typedef enum eTokenType {
TOKEN_BLOCK_LABEL_END,
TOKEN_CHARACTER,
TOKEN_CLOSE_PAREN,
+ TOKEN_COLON,
TOKEN_SEMICOLON,
TOKEN_COMMA,
TOKEN_IDENTIFIER,
@@ -154,7 +161,8 @@ typedef enum eTokenType {
TOKEN_OPEN_SQUARE,
TOKEN_CLOSE_SQUARE,
TOKEN_TILDE,
- TOKEN_FORWARD_SLASH
+ TOKEN_FORWARD_SLASH,
+ TOKEN_EQUAL
} tokenType;
typedef struct sTokenInfoSQL {
@@ -198,6 +206,7 @@ typedef enum {
SQLTAG_SYNONYM,
SQLTAG_MLTABLE,
SQLTAG_MLCONN,
+ SQLTAG_MLPROP,
SQLTAG_COUNT
} sqlKind;
@@ -223,7 +232,8 @@ static kindOption SqlKinds [] = {
{ TRUE, 'V', "view", "views" },
{ TRUE, 'n', "synonym", "synonyms" },
{ TRUE, 'x', "mltable", "MobiLink Table Scripts" },
- { TRUE, 'y', "mlconn", "MobiLink Conn Scripts" }
+ { TRUE, 'y', "mlconn", "MobiLink Conn Scripts" },
+ { TRUE, 'z', "mlprop", "MobiLink Properties " }
};
static const keywordDesc SqlKeywordTable [] = {
@@ -237,9 +247,14 @@ static const keywordDesc SqlKeywordTable [] = {
{ "end", KEYWORD_end },
{ "function", KEYWORD_function },
{ "if", KEYWORD_if },
+ { "else", KEYWORD_else },
+ { "elseif", KEYWORD_elseif },
+ { "endif", KEYWORD_endif },
{ "loop", KEYWORD_loop },
+ { "while", KEYWORD_while },
{ "case", KEYWORD_case },
{ "for", KEYWORD_for },
+ { "do", KEYWORD_do },
{ "call", KEYWORD_call },
{ "package", KEYWORD_package },
{ "pragma", KEYWORD_pragma },
@@ -286,6 +301,7 @@ static const keywordDesc SqlKeywordTable [] = {
{ "ml_add_dnet_connection_script", KEYWORD_ml_conn_dnet },
{ "ml_add_java_connection_script", KEYWORD_ml_conn_java },
{ "ml_add_lang_conn_script_chk", KEYWORD_ml_conn_chk },
+ { "ml_add_property", KEYWORD_ml_prop },
{ "local", KEYWORD_local },
{ "temporary", KEYWORD_temporary },
{ "drop", KEYWORD_drop },
@@ -303,6 +319,7 @@ static const keywordDesc SqlKeywordTable [] = {
/* Recursive calls */
static void parseBlock (tokenInfo *const token, const boolean local);
+static void parseDeclare (tokenInfo *const token, const boolean local);
static void parseKeywords (tokenInfo *const token);
static void parseSqlFile (tokenInfo *const token);
@@ -541,6 +558,7 @@ getNextChar:
case EOF: longjmp (Exception, (int)ExceptionEOF); break;
case '(': token->type = TOKEN_OPEN_PAREN; break;
case ')': token->type = TOKEN_CLOSE_PAREN; break;
+ case ':': token->type = TOKEN_COLON; break;
case ';': token->type = TOKEN_SEMICOLON; break;
case '.': token->type = TOKEN_PERIOD; break;
case ',': token->type = TOKEN_COMMA; break;
@@ -549,6 +567,7 @@ getNextChar:
case '~': token->type = TOKEN_TILDE; break;
case '[': token->type = TOKEN_OPEN_SQUARE; break;
case ']': token->type = TOKEN_CLOSE_SQUARE; break;
+ case '=': token->type = TOKEN_EQUAL; break;
case '\'':
case '"':
@@ -870,6 +889,7 @@ static void parseSubProgram (tokenInfo *const token)
isKeyword (token, KEYWORD_internal) ||
isKeyword (token, KEYWORD_external) ||
isKeyword (token, KEYWORD_url) ||
+ isType (token, TOKEN_EQUAL) ||
isCmdTerm (token)
)
)
@@ -900,6 +920,12 @@ static void parseSubProgram (tokenInfo *const token)
vStringClear (token->scope);
}
+ if ( isType (token, TOKEN_EQUAL) )
+ readToken (token);
+
+ if ( isKeyword (token, KEYWORD_declare) )
+ parseDeclare (token, FALSE);
+
if (isKeyword (token, KEYWORD_is) ||
isKeyword (token, KEYWORD_begin) )
{
@@ -1066,18 +1092,18 @@ static void parseDeclare (tokenInfo *const token, const boolean local)
case KEYWORD_type: parseType (token); break;
default:
- if (isType (token, TOKEN_IDENTIFIER))
- {
- if (local)
- {
- makeSqlTag (token, SQLTAG_LOCAL_VARIABLE);
- }
- else
- {
- makeSqlTag (token, SQLTAG_VARIABLE);
- }
- }
- break;
+ if (isType (token, TOKEN_IDENTIFIER))
+ {
+ if (local)
+ {
+ makeSqlTag (token, SQLTAG_LOCAL_VARIABLE);
+ }
+ else
+ {
+ makeSqlTag (token, SQLTAG_VARIABLE);
+ }
+ }
+ break;
}
findToken (token, TOKEN_SEMICOLON);
readToken (token);
@@ -1164,12 +1190,13 @@ static void parseLabel (tokenInfo *const token)
}
}
-static void parseStatements (tokenInfo *const token)
+static void parseStatements (tokenInfo *const token, const boolean exit_on_endif )
{
boolean isAnsi = TRUE;
boolean stmtTerm = FALSE;
do
{
+
if (isType (token, TOKEN_BLOCK_LABEL_BEGIN))
parseLabel (token);
else
@@ -1210,6 +1237,7 @@ static void parseStatements (tokenInfo *const token)
*/
while (! isKeyword (token, KEYWORD_then))
readToken (token);
+
readToken (token);
continue;
@@ -1220,6 +1248,15 @@ static void parseStatements (tokenInfo *const token)
* IF...THEN
* END IF;
*
+ * IF...THEN
+ * ELSE
+ * END IF;
+ *
+ * IF...THEN
+ * ELSEIF...THEN
+ * ELSE
+ * END IF;
+ *
* or non-ANSI
* IF ...
* BEGIN
@@ -1248,7 +1285,22 @@ static void parseStatements (tokenInfo *const token)
else
{
readToken (token);
- parseStatements (token);
+
+ while( ! (isKeyword (token, KEYWORD_end ) ||
+ isKeyword (token, KEYWORD_endif ) )
+ )
+ {
+ if ( isKeyword (token, KEYWORD_else) ||
+ isKeyword (token, KEYWORD_elseif) )
+ readToken (token);
+
+ parseStatements (token, TRUE);
+
+ if ( isCmdTerm(token) )
+ readToken (token);
+
+ }
+
/*
* parseStatements returns when it finds an END, an IF
* should follow the END for ANSI anyway.
@@ -1258,7 +1310,13 @@ static void parseStatements (tokenInfo *const token)
if( isKeyword (token, KEYWORD_end ) )
readToken (token);
- if( ! isKeyword (token, KEYWORD_if ) )
+ if( isKeyword (token, KEYWORD_if ) || isKeyword (token, KEYWORD_endif ) )
+ {
+ readToken (token);
+ if ( isCmdTerm(token) )
+ stmtTerm = TRUE;
+ }
+ else
{
/*
* Well we need to do something here.
@@ -1284,14 +1342,64 @@ static void parseStatements (tokenInfo *const token)
* END CASE;
*
* FOR loop_name AS cursor_name CURSOR FOR ...
+ * DO
* END FOR;
*/
+ if( isKeyword (token, KEYWORD_for ) )
+ {
+ /* loop name */
+ readToken (token);
+ /* AS */
+ readToken (token);
+
+ while ( ! isKeyword (token, KEYWORD_is) )
+ {
+ /*
+ * If this is not an AS keyword this is
+ * not a proper FOR statement and should
+ * simply be ignored
+ */
+ return;
+ }
+
+ while ( ! isKeyword (token, KEYWORD_do) )
+ readToken (token);
+ }
+
+
readToken (token);
- parseStatements (token);
+ while( ! isKeyword (token, KEYWORD_end ) )
+ {
+ /*
+ if ( isKeyword (token, KEYWORD_else) ||
+ isKeyword (token, KEYWORD_elseif) )
+ readToken (token);
+ */
+
+ parseStatements (token, FALSE);
+
+ if ( isCmdTerm(token) )
+ readToken (token);
+ }
+
if( isKeyword (token, KEYWORD_end ) )
readToken (token);
+ /*
+ * Typically ended with
+ * END LOOP [loop name];
+ * END CASE
+ * END FOR [loop name];
+ */
+ if ( isKeyword (token, KEYWORD_loop) ||
+ isKeyword (token, KEYWORD_case) ||
+ isKeyword (token, KEYWORD_for) )
+ readToken (token);
+
+ if ( isCmdTerm(token) )
+ stmtTerm = TRUE;
+
break;
case KEYWORD_create:
@@ -1324,11 +1432,36 @@ static void parseStatements (tokenInfo *const token)
*
* So we must read to the first semi-colon or an END block
*/
- while ( ! stmtTerm &&
- ! ( isKeyword (token, KEYWORD_end) ||
- (isCmdTerm(token)) )
+ while ( ! stmtTerm &&
+ ! ( isKeyword (token, KEYWORD_end) ||
+ (isCmdTerm(token)) )
)
{
+ if ( isKeyword (token, KEYWORD_endif) &&
+ exit_on_endif )
+ return;
+
+ if (isType (token, TOKEN_COLON) )
+ {
+ /*
+ * A : can signal a loop name
+ * myloop:
+ * LOOP
+ * LEAVE myloop;
+ * END LOOP;
+ * Unfortunately, labels do not have a
+ * cmd terminator, therefore we have to check
+ * if the next token is a keyword and process
+ * it accordingly.
+ */
+ readToken (token);
+ if ( isKeyword (token, KEYWORD_loop) ||
+ isKeyword (token, KEYWORD_while) ||
+ isKeyword (token, KEYWORD_for) )
+ /* parseStatements (token); */
+ return;
+ }
+
readToken (token);
if (isType (token, TOKEN_OPEN_PAREN) ||
@@ -1336,6 +1469,20 @@ static void parseStatements (tokenInfo *const token)
isType (token, TOKEN_OPEN_SQUARE) )
skipToMatched (token);
+ /*
+ * Since we know how to parse various statements
+ * if we detect them, parse them to completion
+ */
+ if (isType (token, TOKEN_BLOCK_LABEL_BEGIN) ||
+ isKeyword (token, KEYWORD_exception) ||
+ isKeyword (token, KEYWORD_loop) ||
+ isKeyword (token, KEYWORD_case) ||
+ isKeyword (token, KEYWORD_for) ||
+ isKeyword (token, KEYWORD_begin) )
+ parseStatements (token, FALSE);
+ else if (isKeyword (token, KEYWORD_if))
+ parseStatements (token, TRUE);
+
}
}
/*
@@ -1343,11 +1490,12 @@ static void parseStatements (tokenInfo *const token)
* See comment above, now, only read if the current token
* is not a command terminator.
*/
- if ( isCmdTerm(token) )
- {
- readToken (token);
- }
- } while (! isKeyword (token, KEYWORD_end) && ! stmtTerm );
+ if ( isCmdTerm(token) && ! stmtTerm )
+ stmtTerm = TRUE;
+
+ } while (! isKeyword (token, KEYWORD_end) &&
+ ! (exit_on_endif && isKeyword (token, KEYWORD_endif) ) &&
+ ! stmtTerm );
}
static void parseBlock (tokenInfo *const token, const boolean local)
@@ -1378,7 +1526,10 @@ static void parseBlock (tokenInfo *const token, const boolean local)
token->begin_end_nest_lvl++;
while (! isKeyword (token, KEYWORD_end))
{
- parseStatements (token);
+ parseStatements (token, FALSE);
+
+ if ( isCmdTerm(token) )
+ readToken (token);
}
token->begin_end_nest_lvl--;
@@ -1994,6 +2145,70 @@ static void parseMLConn (tokenInfo *const token)
deleteToken (event);
}
+static void parseMLProp (tokenInfo *const token)
+{
+ tokenInfo *const component = newToken ();
+ tokenInfo *const prop_set_name = newToken ();
+ tokenInfo *const prop_name = newToken ();
+
+ /*
+ * This deals with these formats
+ * ml_add_property (
+ * 'comp_name',
+ * 'prop_set_name',
+ * 'prop_name',
+ * 'prop_value'
+ * )
+ */
+
+ readToken (token);
+ if ( isType (token, TOKEN_OPEN_PAREN) )
+ {
+ readToken (component);
+ readToken (token);
+ while (!(isType (token, TOKEN_COMMA) ||
+ isType (token, TOKEN_CLOSE_PAREN)
+ ))
+ {
+ readToken (token);
+ }
+
+ if (isType (token, TOKEN_COMMA))
+ {
+ readToken (prop_set_name);
+ readToken (token);
+ while (!(isType (token, TOKEN_COMMA) ||
+ isType (token, TOKEN_CLOSE_PAREN)
+ ))
+ {
+ readToken (token);
+ }
+
+ if (isType (token, TOKEN_COMMA))
+ {
+ readToken (prop_name);
+
+ if (isType (component, TOKEN_STRING) &&
+ isType (prop_set_name, TOKEN_STRING) &&
+ isType (prop_name, TOKEN_STRING) )
+ {
+ addToScope(component, prop_set_name->string);
+ addToScope(component, prop_name->string);
+ makeSqlTag (component, SQLTAG_MLPROP);
+ }
+ }
+ if( !isType (token, TOKEN_CLOSE_PAREN) )
+ findToken (token, TOKEN_CLOSE_PAREN);
+ }
+ }
+
+ findCmdTerm (token, TRUE);
+
+ deleteToken (component);
+ deleteToken (prop_set_name);
+ deleteToken (prop_name);
+}
+
static void parseComment (tokenInfo *const token)
{
/*
@@ -2039,7 +2254,7 @@ static void parseKeywords (tokenInfo *const token)
case KEYWORD_drop: parseDrop (token); break;
case KEYWORD_event: parseEvent (token); break;
case KEYWORD_function: parseSubProgram (token); break;
- case KEYWORD_if: parseStatements (token); break;
+ case KEYWORD_if: parseStatements (token, FALSE); break;
case KEYWORD_index: parseIndex (token); break;
case KEYWORD_ml_table: parseMLTable (token); break;
case KEYWORD_ml_table_lang: parseMLTable (token); break;
@@ -2051,6 +2266,7 @@ static void parseKeywords (tokenInfo *const token)
case KEYWORD_ml_conn_dnet: parseMLConn (token); break;
case KEYWORD_ml_conn_java: parseMLConn (token); break;
case KEYWORD_ml_conn_chk: parseMLConn (token); break;
+ case KEYWORD_ml_prop: parseMLProp (token); break;
case KEYWORD_package: parsePackage (token); break;
case KEYWORD_procedure: parseSubProgram (token); break;
case KEYWORD_publication: parsePublication (token); break;
diff --git a/plugins/symbol-db/anjuta-tags/tex.c b/plugins/symbol-db/anjuta-tags/tex.c
index a285797..0c6714e 100644
--- a/plugins/symbol-db/anjuta-tags/tex.c
+++ b/plugins/symbol-db/anjuta-tags/tex.c
@@ -53,7 +53,8 @@ typedef enum eKeywordId {
KEYWORD_subsubsection,
KEYWORD_part,
KEYWORD_paragraph,
- KEYWORD_subparagraph
+ KEYWORD_subparagraph,
+ KEYWORD_include
} keywordId;
/* Used to determine whether keyword is valid for the token language and
@@ -68,27 +69,15 @@ typedef enum eTokenType {
TOKEN_UNDEFINED,
TOKEN_CHARACTER,
TOKEN_CLOSE_PAREN,
- TOKEN_SEMICOLON,
- TOKEN_COLON,
TOKEN_COMMA,
TOKEN_KEYWORD,
TOKEN_OPEN_PAREN,
- TOKEN_OPERATOR,
TOKEN_IDENTIFIER,
TOKEN_STRING,
- TOKEN_PERIOD,
TOKEN_OPEN_CURLY,
TOKEN_CLOSE_CURLY,
- TOKEN_EQUAL_SIGN,
- TOKEN_EXCLAMATION,
- TOKEN_FORWARD_SLASH,
TOKEN_OPEN_SQUARE,
TOKEN_CLOSE_SQUARE,
- TOKEN_OPEN_MXML,
- TOKEN_CLOSE_MXML,
- TOKEN_CLOSE_SGML,
- TOKEN_LESS_THAN,
- TOKEN_GREATER_THAN,
TOKEN_QUESTION_MARK,
TOKEN_STAR
} tokenType;
@@ -118,6 +107,7 @@ typedef enum {
TEXTAG_PART,
TEXTAG_PARAGRAPH,
TEXTAG_SUBPARAGRAPH,
+ TEXTAG_INCLUDE,
TEXTAG_COUNT
} texKind;
@@ -128,7 +118,8 @@ static kindOption TexKinds [] = {
{ TRUE, 'b', "subsubsection", "subsubsections" },
{ TRUE, 'p', "part", "parts" },
{ TRUE, 'P', "paragraph", "paragraphs" },
- { TRUE, 'G', "subparagraph", "subparagraphs" }
+ { TRUE, 'G', "subparagraph", "subparagraphs" },
+ { TRUE, 'i', "include", "includes" }
};
static const keywordDesc TexKeywordTable [] = {
@@ -139,7 +130,8 @@ static const keywordDesc TexKeywordTable [] = {
{ "subsubsection", KEYWORD_subsubsection },
{ "part", KEYWORD_part },
{ "paragraph", KEYWORD_paragraph },
- { "subparagraph", KEYWORD_subparagraph }
+ { "subparagraph", KEYWORD_subparagraph },
+ { "include", KEYWORD_include }
};
/*
@@ -150,7 +142,7 @@ static boolean isIdentChar (const int c)
{
return (boolean)
(isalpha (c) || isdigit (c) || c == '$' ||
- c == '_' || c == '#');
+ c == '_' || c == '#' || c == '-' || c == '.');
}
static void buildTexKeywordHash (void)
@@ -297,16 +289,11 @@ getNextChar:
case EOF: longjmp (Exception, (int)ExceptionEOF); break;
case '(': token->type = TOKEN_OPEN_PAREN; break;
case ')': token->type = TOKEN_CLOSE_PAREN; break;
- case ';': token->type = TOKEN_SEMICOLON; break;
case ',': token->type = TOKEN_COMMA; break;
- case '.': token->type = TOKEN_PERIOD; break;
- case ':': token->type = TOKEN_COLON; break;
case '{': token->type = TOKEN_OPEN_CURLY; break;
case '}': token->type = TOKEN_CLOSE_CURLY; break;
- case '=': token->type = TOKEN_EQUAL_SIGN; break;
case '[': token->type = TOKEN_OPEN_SQUARE; break;
case ']': token->type = TOKEN_CLOSE_SQUARE; break;
- case '?': token->type = TOKEN_QUESTION_MARK; break;
case '*': token->type = TOKEN_STAR; break;
case '\'':
@@ -427,7 +414,8 @@ static boolean parseTag (tokenInfo *const token, texKind kind)
readToken (token);
while (! isType (token, TOKEN_CLOSE_CURLY) )
{
- if (isType (token, TOKEN_IDENTIFIER) && useLongName)
+ /* if (isType (token, TOKEN_IDENTIFIER) && useLongName) */
+ if (useLongName)
{
if (fullname->length > 0)
vStringCatS (fullname, " ");
@@ -479,6 +467,9 @@ static void parseTexFile (tokenInfo *const token)
case KEYWORD_subparagraph:
parseTag (token, TEXTAG_SUBPARAGRAPH);
break;
+ case KEYWORD_include:
+ parseTag (token, TEXTAG_INCLUDE);
+ break;
default:
break;
}
[
Date Prev][
Date Next] [
Thread Prev][
Thread Next]
[
Thread Index]
[
Date Index]
[
Author Index]