Showing 204 of 204 total issues
Function __init__
has 6 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def __init__(
Function wl_test_pos_tag_models
has 6 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def wl_test_pos_tag_models(lang, pos_tagger, test_sentence, tokens, results, results_universal):
Avoid deeply nested control flow statements. Open
Open
for word in tr.split():
add_val_to_trs(trs_lexicon, word, vals)
else:
Avoid deeply nested control flow statements. Open
Open
if i == 0 and j == 0 and k == 0:
tokens = []
for l, token in enumerate(sentence_seg):
# Do not remove the first token and set it to an empty token instead if it is a punctuation mark
Function wl_pos_tag
has 6 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def wl_pos_tag(main, inputs, lang, pos_tagger = 'default', tagset = 'default', force = False):
Avoid deeply nested control flow statements. Open
Open
for doc in nlp.pipe(lines):
for token in doc:
texts_tagged.append(token.text)
if tagset in ['default', 'raw']:
Avoid deeply nested control flow statements. Open
Open
if lens.any():
self.set_item_num(row, i, numpy.mean(lens))
self.set_item_num(row + 1, i, numpy.std(lens))
self.set_item_num(row + 2, i, numpy.var(lens))
self.set_item_num(row + 3, i, numpy.min(lens))
Avoid deeply nested control flow statements. Open
Open
for col in cols:
row_to_exp.append(self.table.model().item(row, col).text())
Function wl_test_lemmatize_models
has 6 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def wl_test_lemmatize_models(lang, lemmatizer, test_sentence, tokens, results, lang_exceptions = None):
Avoid deeply nested control flow statements. Open
Open
for util, trs in TRS_NLP_UTILS.items():
# Only replace language util names after language names or at the end of text
if f' - {util}' in tr or tr.endswith(util):
if f' - {util}' in tr:
tr = tr.replace(f' - {util}', f' - {trs[0]}', 1)
Avoid deeply nested control flow statements. Open
Open
if settings['token_settings']['punc_marks']:
node_tokens_search = list(ngram)
# Remove empty tokens for searching in results
left_tokens_search = [token for token in copy.deepcopy(left_tokens_raw) if token]
Avoid deeply nested control flow statements. Open
Open
if not self.isRowHidden(row):
item = self.model().item(row, col)
val_cum += item.val
item.setText(str(val_cum))
Avoid deeply nested control flow statements. Open
Open
if not self.isRowHidden(row):
item = self.model().item(row, col)
item.setText(f'{item.val:.{precision_decimals}}')
# Percentages
Avoid deeply nested control flow statements. Open
Open
if prefer_raw:
# Always use original tokens
results_modified.extend(tokens_raw_temp)
# eg. POS tagging
else:
Avoid deeply nested control flow statements. Open
Open
for token, lemma_search in set(zip(tokens, lemmas_search)):
if re_match(lemma_matched, lemma_search, flags = re_flags):
tokens_matched[search_term_token].add(token)
Avoid deeply nested control flow statements. Open
Open
for sentence in doc.sentences:
for token in sentence.words:
texts_tagged.append(token.text)
if tagset in ['default', 'raw']:
Avoid deeply nested control flow statements. Open
Open
for k, ngram in enumerate(wl_nlp_utils.ngrams(tokens, len_search_term)):
if ngram == search_term:
points.append([x_start + k / text.num_tokens * len_tokens_total, y_start - j])
# Total
points.append([x_start_total + k, 0])
Function wl_test_sentiment_analyze_models
has 6 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def wl_test_sentiment_analyze_models(lang, sentiment_analyzer, test_sentence, tokens, results, check_results = True):
Avoid deeply nested control flow statements. Open
Open
if settings_limit_searching == _tr('wl_colligation_extractor', 'Within sentence segments'):
offsets_unit = offsets_sentence_segs
len_unit = len_sentence_segs
elif settings_limit_searching == _tr('wl_colligation_extractor', 'Within sentences'):
offsets_unit = offsets_sentences
Avoid deeply nested control flow statements. Open
Open
if settings_limit_searching == _tr('wl_colligation_extractor', 'None'):
tags_left = text.tags[max(0, i + window_left) : i]
tags_right = text.tags[i + ngram_size : i + ngram_size + window_right]
else:
# Span positions (Left)