Showing 206 of 206 total issues
Avoid deeply nested control flow statements. Open
Open
if ngram[i : i + len_search_term] == search_term:
ngrams_is_filtered.append((ngram, ngram_i))
# Check context settings
ngrams_is = (
Function __init__
has 6 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def __init__(self, main, title, width = 0, height = 0, resizable = True, no_buttons = False):
Avoid deeply nested control flow statements. Open
Open
for token in sentence_seg:
refs = head_refs[i_token]
if refs is not None:
token.head = self.tokens_multilevel[refs[0]][refs[1]][refs[2]][refs[3]]
Avoid deeply nested control flow statements. Open
Open
for tokens in wl_nlp_utils.split_token_list(main, texts, pos_tagger):
# The Japanese model do not have a tagger component and Japanese POS tags are taken directly from SudachiPy
# See: https://github.com/explosion/spaCy/discussions/9983#discussioncomment-1910117
if lang == 'jpn':
docs.append(''.join(tokens))
Avoid deeply nested control flow statements. Open
Open
if not self.isRowHidden(row):
item = self.model().item(row, col)
val_cum += item.val
item.setText(str(val_cum))
Avoid deeply nested control flow statements. Open
Open
if not self.isRowHidden(row):
item = self.model().item(row, col)
val_cum += item.val
item.setText(f'{val_cum:.{precision_pcts}%}')
Function wl_test_sentiment_analyze_models
has 6 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def wl_test_sentiment_analyze_models(lang, sentiment_analyzer, test_sentence, tokens, results, check_results = True):
Avoid deeply nested control flow statements. Open
Open
for word in tr.split():
add_val_to_trs(trs_lexicon, word, vals)
else:
Avoid deeply nested control flow statements. Open
Open
if ngram == search_term:
colligations_freqs_file_filtered[(node, collocate)] = freqs
Avoid deeply nested control flow statements. Open
Open
if token.lemma is not None:
lemmas.append(token.lemma)
else:
lemmas.append(token.text)
else:
Avoid deeply nested control flow statements. Open
Open
for token in sentence_seg:
if token.tag is not None:
token.tag = token.tag.lower()
if token.lemma is not None:
Function wl_pos_tag
has 6 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def wl_pos_tag(main, inputs, lang, pos_tagger = 'default', tagset = 'default', force = False):
Avoid deeply nested control flow statements. Open
Open
for file_type, trs in TRS_FILE_TYPES.items():
if tr == file_type:
tr = trs[0]
break
Avoid deeply nested control flow statements. Open
Open
for sentence in doc.sentences:
for token in sentence.words:
texts_tagged.append(token.text)
if tagset in ['default', 'raw']:
Avoid deeply nested control flow statements. Open
Open
if lens.any():
self.set_item_num(row, i, numpy.mean(lens))
self.set_item_num(row + 1, i, numpy.std(lens))
self.set_item_num(row + 2, i, numpy.var(lens))
self.set_item_num(row + 3, i, numpy.min(lens))
Avoid deeply nested control flow statements. Open
Open
if not self.isRowHidden(row):
item = self.model().item(row, col)
item.setText(f'{item.val:.{precision_pcts}%}')
elif self.header_orientation == 'vert':
Avoid deeply nested control flow statements. Open
Open
for lang, trs in TRS_LANGS.items():
# Language names
if tr == lang:
tr = trs[0]
elif f'{lang} (' in tr:
Avoid deeply nested control flow statements. Open
Open
for encoding, trs in TRS_ENCODINGS.items():
if encoding in tr:
tr = tr.replace(encoding, trs[0])
break
Avoid deeply nested control flow statements. Open
Open
if re_match(search_term, token_search.display_text(), flags = re_flags):
search_results.add(token)
# Match inflected forms of search terms and search results
if settings['match_inflected_forms']:
Avoid deeply nested control flow statements. Open
Open
for sentence_seg in sentence:
for i, token in enumerate(sentence_seg):
if token.islower():
sentence_seg[i] = wl_texts.Wl_Token('')
# Uppercase