Showing 206 of 206 total issues
Avoid deeply nested control flow statements. Open
Open
for doc in nlp.pipe(docs):
for token in doc:
texts_tagged.append(token.text)
if tagset in ['default', 'raw']:
Avoid deeply nested control flow statements. Open
Open
if freqs_totals[j][k]:
self.set_item_num(i, cols_freqs_start[j] + k * 2 + 1, freq / freqs_totals[j][k])
else:
self.set_item_num(i, cols_freqs_start[j] + k * 2 + 1, 0)
Avoid deeply nested control flow statements. Open
Open
for j, collocate in enumerate(reversed(tags_left)):
if wl_matching.check_context(
i, tokens,
context_settings = settings['search_settings']['context_settings'],
search_terms_incl = search_terms_incl,
Avoid deeply nested control flow statements. Open
Open
for node in nodes:
len_node = len(node)
for j, ngram in enumerate(wl_nlp_utils.ngrams(parallel_unit, len_node)):
if ngram == tuple(node):
Function wl_spin_boxes_min_max_sync
has 6 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def wl_spin_boxes_min_max_sync(
Avoid deeply nested control flow statements. Open
Open
if settings_limit_searching == _tr('Wl_Worker_Colligation_Extractor', 'None'):
tags_left = text.tags[max(0, i + window_left) : i]
tags_right = text.tags[i + ngram_size : i + ngram_size + window_right]
else:
# Span positions (Left)
Avoid deeply nested control flow statements. Open
Open
if tuple(tokens[i + j : i + j + len(search_term)]) == tuple(search_term):
excl_matched = False
break
# Search terms to be excluded not found in texts
Avoid deeply nested control flow statements. Open
Open
for sentence_seg in sentence:
for i, token in enumerate(sentence_seg):
# Convert to strings to ignore tags and punctuation marks, if any, when checking for stop words
if token.lower() in stop_words:
sentence_seg[i] = wl_texts.Wl_Token('')
Avoid deeply nested control flow statements. Open
Open
for sentence in sentences:
tokens_multilevel[-1].append(main.nltk_nist_tokenizer.international_tokenize(sentence))
case 'nltk_nltk':
Function wl_spin_boxes_min_max
has 6 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def wl_spin_boxes_min_max(
Avoid deeply nested control flow statements. Open
Open
for item in range(100):
item = wl_texts.Wl_Token(str(item))
freq_1, freq_2 = random.sample(range(100), 2)
freq_files_items[item] = [
Avoid deeply nested control flow statements. Open
Open
if tr_raw != tr:
# Do not replace parentheses in file type filters
if element_src.text not in TRS_FILE_TYPES:
# Parentheses
tr = re.sub(r'\s*\(', r'(', tr)
Avoid deeply nested control flow statements. Open
Open
if self.settings_tags == 'body_tag_settings' and tag_name == '*':
opening_tag_text = opening_tag_text.replace('*', _tr('Wl_Table_Tags', 'TAG'))
closing_tag_text = self.model().item(row, 3).text().replace('*', _tr('Wl_Table_Tags', 'TAG'))
preview.setText(opening_tag_text + _tr('Wl_Table_Tags', 'token') + closing_tag_text)
else:
Avoid deeply nested control flow statements. Open
Open
if prefer_raw:
# Always use original tokens
results_modified.extend(tokens_raw_temp)
# eg. POS tagging
else:
Avoid deeply nested control flow statements. Open
Open
if settings['generation_settings']['context_len_unit'] == self.tr('Character'):
len_context_left = 0
len_context_right = 0
left_tokens_raw = []
Avoid deeply nested control flow statements. Open
Open
if settings_limit_searching == _tr('Wl_Worker_Colligation_Extractor', 'None'):
tags_left = text.tags[max(0, i + window_left) : max(0, i + window_right + 1)]
else:
# Span positions (Left)
for position in range(max(0, i + window_left), max(0, i + window_right + 1)):
Avoid deeply nested control flow statements. Open
Open
if text.lang in self.main.settings_global['sentiment_analyzers']:
sentiment_inputs.append(' '.join(
[*left_tokens_search, *node_tokens_search, *right_tokens_search]
))
Avoid deeply nested control flow statements. Open
Open
for j, count in enumerate(counts):
self.set_item_num(
row = self.model().rowCount() - 2,
col = j,
val = count
Avoid deeply nested control flow statements. Open
Open
if re_match(lemma_matched, lemma_search, flags = re_flags):
tokens_matched[token_matched].add(token)
Avoid deeply nested control flow statements. Open
Open
for sentence_seg in sentence:
for i, token in enumerate(sentence_seg):
# Convert to strings to ignore tags and punctuation marks, if any, when checking for stop words
if str(token) in stop_words:
sentence_seg[i] = wl_texts.Wl_Token('')