Showing 206 of 206 total issues
Avoid deeply nested control flow statements. Open
Open
for i, token in enumerate(sentence_seg):
if wl_checks_tokens.is_num(token):
sentence_seg[i] = wl_texts.Wl_Token('')
# Replace token texts with lemmas
Avoid deeply nested control flow statements. Open
Open
for sentence in doc.sentences:
for token in sentence.words:
texts_tagged.append(token.text)
if tagset in ['default', 'raw']:
Avoid deeply nested control flow statements. Open
Open
for k, ngram in enumerate(wl_nlp_utils.ngrams(text.get_tokens_flat(), len_search_term)):
if ngram == search_term:
points.append([x_start + k, i])
Avoid deeply nested control flow statements. Open
Open
if any((text in tr for text in [])):
# Flag translation as unfinished to be reviewed manually
unfinished = True
Avoid deeply nested control flow statements. Open
Open
for word in wl_word_tokenization.wl_word_tokenize_flat(main, tr, lang):
add_val_to_trs(trs_lexicon, word, vals)
else:
Avoid deeply nested control flow statements. Open
Open
if wl_matching.split_tag_embedded(opening_tag_text)[1] == '*':
opening_tag_text = opening_tag_text.replace('*', self.tr('TAG'))
Avoid deeply nested control flow statements. Open
Open
for tag in re.finditer(re_tags, para):
tags_tokens = self.add_tags_splitting(para[i_tag_end:tag.start()], tags_tokens)
tags_tokens[-1].append(tag.group())
i_tag_end = tag.end()
Avoid deeply nested control flow statements. Open
Open
if (para := para[i_tag_end:]):
tags_tokens = self.add_tags_splitting(para, tags_tokens)
# Insert tags at the start of the text
if self.tags_text_start and tags_tokens:
Avoid deeply nested control flow statements. Open
Open
for token, lemma_search in set(zip(tokens, lemmas_search)):
if re_match(lemma_matched, lemma_search, flags = re_flags):
search_results.add(token)
Avoid deeply nested control flow statements. Open
Open
for token, lemma_search in set(zip(tokens, lemmas_search)):
if re_match(lemma_matched, lemma_search, flags = re_flags):
tokens_matched[search_term_token].add(token)
Avoid deeply nested control flow statements. Open
Open
for sentence_seg in sentence:
for i, token in enumerate(sentence_seg):
if token.istitle():
sentence_seg[i] = wl_texts.Wl_Token('')
else:
Avoid deeply nested control flow statements. Open
Open
if i != item_row and item_text == text:
wl_msg_boxes.Wl_Msg_Box_Warning(
self.main,
title = _tr('wl_lists', 'Duplicates Found'),
text = _tr('wl_lists', '''
Function add_headers_vert
has 6 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def add_headers_vert(
Avoid deeply nested control flow statements. Open
Open
for file in glob.glob(os.path.join(
self.settings_custom['general']['imp']['temp_files']['default_path'], '*.*'
)):
os.remove(file)
Function __init__
has 6 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def __init__(
Avoid deeply nested control flow statements. Open
Open
if (
(
(
not settings['search_settings']['match_dependency_relations']
and (token in search_terms or head in search_terms)
Avoid deeply nested control flow statements. Open
Open
for j, collocate in enumerate(tokens_right):
if wl_matching.check_context(
i, tokens,
context_settings = settings['search_settings']['context_settings'],
search_terms_incl = search_terms_incl,
Avoid deeply nested control flow statements. Open
Open
for j, collocate in enumerate(reversed(tokens_left)):
if wl_matching.check_context(
i, tokens,
context_settings = settings['search_settings']['context_settings'],
search_terms_incl = search_terms_incl,
Avoid deeply nested control flow statements. Open
Open
if settings_limit_searching == _tr('Wl_Worker_Collocation_Extractor', 'None'):
tokens_right = tokens[i + ngram_size + window_left - 1 : i + ngram_size + window_right]
else:
# Span positions (Right)
for position in range(i + ngram_size + window_left - 1, i + ngram_size + window_right):
Avoid deeply nested control flow statements. Open
Open
with open(file_settings_display_lang, 'wb') as f:
pickle.dump(action.lang, f)
# Remove settings file
if os.path.exists(file_settings):