Showing 204 of 204 total issues
Avoid deeply nested control flow statements. Open
Open
if line and line not in items_cur:
items_to_imp.append(line)
Avoid deeply nested control flow statements. Open
Open
for i, token in enumerate(sentence_seg):
if wl_checks_tokens.is_num(token):
sentence_seg[i] = ''
# Filter stop words
Avoid deeply nested control flow statements. Open
Open
if wl_matching.split_tag_embedded(opening_tag_text)[1] == '*':
opening_tag_text = opening_tag_text.replace('*', self.tr('TAG'))
Avoid deeply nested control flow statements. Open
Open
for j, collocate in enumerate(reversed(tags_left)):
if wl_matching.check_context(
i, tokens,
context_settings = settings['search_settings']['context_settings'],
search_terms_incl = search_terms_incl,
Avoid deeply nested control flow statements. Open
Open
if ngram == search_term:
colligations_freqs_file_filtered[(node, collocate)] = freqs
Avoid deeply nested control flow statements. Open
Open
for k, ngram in enumerate(wl_nlp_utils.ngrams(text.get_tokens_flat(), len_search_term)):
if ngram == search_term:
points.append([x_start + k, i])
Avoid deeply nested control flow statements. Open
Open
for collocate in range(10):
collocate = wl_texts.Wl_Token(str(collocate))
freq_1, freq_2 = random.sample(range(10000), 2)
freq_files_items[(node, collocate)] = [
Avoid deeply nested control flow statements. Open
Open
if self.settings_tags == 'body_tag_settings' and tag_name == '*':
opening_tag_text = opening_tag_text.replace('*', _tr('wl_settings_files', 'TAG'))
closing_tag_text = self.model().item(row, 3).text().replace('*', _tr('wl_settings_files', 'TAG'))
preview.setText(opening_tag_text + _tr('wl_settings_files', 'token') + closing_tag_text)
else:
Avoid deeply nested control flow statements. Open
Open
if settings_limit_searching == _tr('wl_colligation_extractor', 'None'):
tags_right = text.tags[i + ngram_size + window_left - 1 : i + ngram_size + window_right]
else:
# Span positions (Right)
for position in range(i + ngram_size + window_left - 1, i + ngram_size + window_right):
Avoid deeply nested control flow statements. Open
Open
if (para := para[tag_last_end:]):
tags_tokens = self.add_tags_splitting(para, tags_tokens)
# Add empty tags for untagged files
if not self.tagged:
Avoid deeply nested control flow statements. Open
Open
for doc in nlp.pipe(docs):
for token in doc:
texts_tagged.append(token.text)
if tagset in ['default', 'raw']:
Avoid deeply nested control flow statements. Open
Open
for item in range(100):
item = wl_texts.Wl_Token(str(item))
freq_1, freq_2 = random.sample(range(100), 2)
freq_files_items[item] = [
Avoid deeply nested control flow statements. Open
Open
if token_properties:
i_tag_start += len(doc)
Avoid deeply nested control flow statements. Open
Open
if not file_name:
wl_msg_boxes.Wl_Msg_Box_Warning(
self.main,
title = self.tr('Empty File Name'),
text = self.tr('''
Avoid deeply nested control flow statements. Open
Open
if token.lemma_:
lemmas.append(token.lemma_)
else:
lemmas.append(token.text)
# Stanza
Function add_header_vert
has 6 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def add_header_vert(
Avoid deeply nested control flow statements. Open
Open
for sentence in wl_sentence_tokenization.wl_sentence_split(self.main, text_no_tags):
self.tokens_multilevel[-1].append([])
for sentence_seg in wl_sentence_tokenization.wl_sentence_seg_tokenize_tokens(
self.main,
Function wl_spin_boxes_min_max
has 6 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def wl_spin_boxes_min_max(
Avoid deeply nested control flow statements. Open
Open
for item in range(100):
item = wl_texts.Wl_Token(str(item))
freq_1, freq_2 = random.sample(range(100), 2)
freq_files_items[item] = [
Avoid deeply nested control flow statements. Open
Open
if tuple(tokens[i + j : i + j + len(search_term)]) == tuple(search_term):
excl_matched = False
break
# Search terms to be excluded not found in texts