-
-
Notifications
You must be signed in to change notification settings - Fork 0
Add data_push parameter to DataStack::reduce_action
#41
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
Summary of ChangesHello @ehwan, I'm Gemini Code Assist1! I'm currently reviewing this pull request and will post my feedback shortly. In the meantime, here's a summary to help you and other reviewers quickly get up to speed! This pull request introduces a Highlights
Using Gemini Code AssistThe full guide for Gemini Code Assist can be found on our documentation page, here are some quick tips. Invoking Gemini You can request assistance from Gemini at any point by creating a comment using either
Customization To customize Gemini Code Assist for GitHub experience, repository maintainers can create a configuration file and/or provide a custom code review style guide (such as PEP-8 for Python) by creating and adding files to a Limitations & Feedback Gemini Code Assist may make mistakes. Please leave feedback on any instances where its feedback is incorrect or counter productive. You can react with 👍 and 👎 on @gemini-code-assist comments. If you're interested in giving your feedback about your experience with Gemini Code Assist for Github and other Google products, sign up here. You can also get AI-powered code generation, chat, as well as code reviews directly in the IDE at no cost with the Gemini Code Assist IDE Extension. Footnotes
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Code Review
This pull request introduces a push_data parameter to the DataStack::reduce_action method, which is a nice refactoring. It makes the intent clearer and moves the logic of whether to push data into the DataStack implementation, simplifying the parser context code in both deterministic and non-deterministic parsers. The changes are well-propagated through the codebase, including the code generator and example files. I have one suggestion to improve the maintainability of the code generator.
| if rule.tokens.len() > 0 { | ||
| // if first token's tag is equal to new_tag, no need to (pop n tokens -> push new token). | ||
| // just pop n-1 tokens | ||
| let first_tag_name = token_to_stack_name(rule.tokens[0].token) | ||
| .unwrap_or(&empty_tag_name); | ||
|
|
||
| if first_tag_name == new_tag_name { | ||
| // pop n-1 tokens, no new insertion | ||
| let len = rule.tokens.len() - 1; | ||
| let truncate_stream = if len > 0 { | ||
| quote! {__data_stack.#tag_stack_name.truncate(__data_stack.#tag_stack_name.len() - #len);} | ||
| if new_tag_name == &empty_tag_name { | ||
| // if first token's tag is equal to new_tag, no need to (pop n tokens -> push new token). | ||
| // just pop n-1 tokens | ||
| let first_tag_name = token_to_stack_name(rule.tokens[0].token) | ||
| .unwrap_or(&empty_tag_name); | ||
|
|
||
| if first_tag_name == new_tag_name { | ||
| // pop n-1 tokens, no new insertion | ||
| let len = rule.tokens.len() - 1; | ||
| let truncate_stream = if len > 0 { | ||
| quote! {__data_stack.#tag_stack_name.truncate(__data_stack.#tag_stack_name.len() - #len);} | ||
| } else { | ||
| TokenStream::new() | ||
| }; | ||
| truncate_stream | ||
| } else { | ||
| TokenStream::new() | ||
| }; | ||
| truncate_stream | ||
| let len = rule.tokens.len(); | ||
| // len > 0 here | ||
| quote! { | ||
| __data_stack.#tag_stack_name.truncate(__data_stack.#tag_stack_name.len() - #len); | ||
| __data_stack.#tag_stack_name.push(#tag_enum_name::#new_tag_name); | ||
| } | ||
| } | ||
| } else { | ||
| let len = rule.tokens.len(); | ||
| // len > 0 here | ||
| quote! { | ||
| __data_stack.#tag_stack_name.truncate(__data_stack.#tag_stack_name.len() - #len); | ||
| __data_stack.#tag_stack_name.push(#tag_enum_name::#new_tag_name); | ||
| // if first token's tag is equal to new_tag, no need to (pop n tokens -> push new token). | ||
| // just pop n-1 tokens | ||
| let first_tag_name = token_to_stack_name(rule.tokens[0].token) | ||
| .unwrap_or(&empty_tag_name); | ||
|
|
||
| if first_tag_name == new_tag_name { | ||
| // pop n-1 tokens, no new insertion | ||
| let lenm1 = rule.tokens.len() - 1; | ||
| let truncate_stream = if lenm1 > 0 { | ||
| quote! {__data_stack.#tag_stack_name.truncate(__data_stack.#tag_stack_name.len() - #lenm1);} | ||
| } else { | ||
| TokenStream::new() | ||
| }; | ||
|
|
||
| let len = rule.tokens.len(); | ||
| quote! { | ||
| if __push_data { | ||
| #truncate_stream | ||
| } else { | ||
| __data_stack.#tag_stack_name.truncate(__data_stack.#tag_stack_name.len() - #len); | ||
| __data_stack.#tag_stack_name.push(#tag_enum_name::#empty_tag_name); | ||
| } | ||
| } | ||
| } else if first_tag_name == &empty_tag_name { | ||
| // pop n-1 tokens, no new insertion | ||
| let lenm1 = rule.tokens.len() - 1; | ||
| let truncate_stream = if lenm1 > 0 { | ||
| quote! {__data_stack.#tag_stack_name.truncate(__data_stack.#tag_stack_name.len() - #lenm1);} | ||
| } else { | ||
| TokenStream::new() | ||
| }; | ||
|
|
||
| let len = rule.tokens.len(); | ||
| quote! { | ||
| if __push_data { | ||
| __data_stack.#tag_stack_name.truncate(__data_stack.#tag_stack_name.len() - #len); | ||
| __data_stack.#tag_stack_name.push(#tag_enum_name::#new_tag_name); | ||
| } else { | ||
| #truncate_stream | ||
| } | ||
| } | ||
| } else { | ||
| let len = rule.tokens.len(); | ||
| // len > 0 here | ||
| quote! { | ||
| __data_stack.#tag_stack_name.truncate(__data_stack.#tag_stack_name.len() - #len); | ||
| if __push_data { | ||
| __data_stack.#tag_stack_name.push(#tag_enum_name::#new_tag_name); | ||
| } else { | ||
| __data_stack.#tag_stack_name.push(#tag_enum_name::#empty_tag_name); | ||
| } | ||
| } | ||
| } | ||
| } | ||
| } else { | ||
| quote! { | ||
| __data_stack.#tag_stack_name.push(#tag_enum_name::#new_tag_name); | ||
| if new_tag_name == &empty_tag_name { | ||
| quote! { | ||
| __data_stack.#tag_stack_name.push(#tag_enum_name::#new_tag_name); | ||
| } | ||
| } else { | ||
| quote! { | ||
| if __push_data { | ||
| __data_stack.#tag_stack_name.push(#tag_enum_name::#new_tag_name); | ||
| } else { | ||
| __data_stack.#tag_stack_name.push(#tag_enum_name::#empty_tag_name); | ||
| } | ||
| } | ||
| } | ||
| } |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The logic for generating tag stack modifications is quite complex and contains a lot of duplicated code. This makes it hard to read and maintain. For example, let first_tag_name = ... and the logic to create truncate_stream are repeated.
Consider refactoring this block to reduce duplication. You could extract common variables and structures to the top of the if rule.tokens.len() > 0 block. This would make the logic for each case clearer and the whole block more maintainable.
This refactoring would also help avoid generating code like if __push_data {} else { ... } which is functionally correct but stylistically awkward.
if rule.tokens.len() > 0 {
let first_tag_name = token_to_stack_name(rule.tokens[0].token)
.unwrap_or(&empty_tag_name);
let len = rule.tokens.len();
let lenm1 = len - 1;
let truncate_lenm1 = if lenm1 > 0 {
quote! {__data_stack.#tag_stack_name.truncate(__data_stack.#tag_stack_name.len() - #lenm1);}
} else {
TokenStream::new()
};
let truncate_len = quote! {__data_stack.#tag_stack_name.truncate(__data_stack.#tag_stack_name.len() - #len);};
if new_tag_name == &empty_tag_name {
if first_tag_name == new_tag_name {
// pop n-1 tokens, no new insertion
truncate_lenm1
} else {
// pop n tokens, push empty
quote! {
#truncate_len
__data_stack.#tag_stack_name.push(#tag_enum_name::#new_tag_name);
}
}
} else { // new_tag_name is not empty
if first_tag_name == new_tag_name {
// pop n-1 tokens if __push_data, else pop n and push empty
quote! {
if __push_data {
#truncate_lenm1
} else {
#truncate_len
__data_stack.#tag_stack_name.push(#tag_enum_name::#empty_tag_name);
}
}
} else if first_tag_name == &empty_tag_name {
// pop n and push new if __push_data, else pop n-1
quote! {
if __push_data {
#truncate_len
__data_stack.#tag_stack_name.push(#tag_enum_name::#new_tag_name);
} else {
#truncate_lenm1
}
}
} else {
// pop n, and push new or empty based on __push_data
quote! {
#truncate_len
if __push_data {
__data_stack.#tag_stack_name.push(#tag_enum_name::#new_tag_name);
} else {
__data_stack.#tag_stack_name.push(#tag_enum_name::#empty_tag_name);
}
}
}
}
} else {
if new_tag_name == &empty_tag_name {
quote! {
__data_stack.#tag_stack_name.push(#tag_enum_name::#new_tag_name);
}
} else {
quote! {
if __push_data {
__data_stack.#tag_stack_name.push(#tag_enum_name::#new_tag_name);
} else {
__data_stack.#tag_stack_name.push(#tag_enum_name::#empty_tag_name);
}
}
}
}
No description provided.