Showing 202 of 202 total issues
Avoid deeply nested control flow statements. Open
Open
for file_type, trs in TRS_FILE_TYPES.items():
if tr == file_type:
tr = trs[0]
break
Avoid deeply nested control flow statements. Open
Open
if tuple(tokens[i + j : i + j + len(search_term)]) == tuple(search_term):
incl_matched = True
break
# Search terms to be included not found in texts
Avoid deeply nested control flow statements. Open
Open
for sentence_seg in sentence:
for i, token in enumerate(sentence_seg):
if token.isupper():
sentence_seg[i] = ''
# Title Case
Avoid deeply nested control flow statements. Open
Open
for word in wl_word_tokenization.wl_word_tokenize_flat(main, tr, lang):
add_val_to_trs(trs_lexicon, word, vals)
else:
Avoid deeply nested control flow statements. Open
Open
if token_properties:
i_tag_start += len(doc)
Avoid deeply nested control flow statements. Open
Open
for k, ngram in enumerate(wl_nlp_utils.ngrams(tokens, len_search_term)):
if ngram == search_term:
points.append([x_start + k / text.num_tokens * len_tokens_total, y_start - j])
# Total
points.append([x_start_total + k, 0])
Avoid deeply nested control flow statements. Open
Open
if settings_limit_searching == _tr('wl_colligation_extractor', 'None'):
tags_right = text.tags[i + ngram_size + window_left - 1 : i + ngram_size + window_right]
else:
# Span positions (Right)
for position in range(i + ngram_size + window_left - 1, i + ngram_size + window_right):
Avoid deeply nested control flow statements. Open
Open
for j, collocate in enumerate(tags_right):
if wl_matching.check_context(
i, tokens,
context_settings = settings['search_settings']['context_settings'],
search_terms_incl = search_terms_incl,
Avoid deeply nested control flow statements. Open
Open
for i, token in enumerate(sentence_seg):
if wl_checks_tokens.is_word_alphabetic(token):
sentence_seg[i] = ''
# Numerals
Avoid deeply nested control flow statements. Open
Open
for col in cols:
row_to_exp.append(self.table.model().item(row, col).text())
Avoid deeply nested control flow statements. Open
Open
if self.settings_tags == 'body_tag_settings' and tag_name == '*':
opening_tag_text = opening_tag_text.replace('*', _tr('wl_settings_files', 'TAG'))
closing_tag_text = self.model().item(row, 3).text().replace('*', _tr('wl_settings_files', 'TAG'))
preview.setText(opening_tag_text + _tr('wl_settings_files', 'token') + closing_tag_text)
else:
Avoid deeply nested control flow statements. Open
Open
if any((text in tr for text in [])):
# Flag translation as unfinished to be reviewed manually
unfinished = True
Avoid deeply nested control flow statements. Open
Open
for j, collocate in enumerate(reversed(tags_left)):
if wl_matching.check_context(
i, tokens,
context_settings = settings['search_settings']['context_settings'],
search_terms_incl = search_terms_incl,
Avoid deeply nested control flow statements. Open
Open
for sentence in wl_sentence_tokenization.wl_sentence_split(self.main, text_no_tags):
self.tokens_multilevel[-1].append([])
for sentence_seg in wl_sentence_tokenization.wl_sentence_seg_tokenize_tokens(
self.main,
Avoid deeply nested control flow statements. Open
Open
if token.lemma is not None:
lemmas.append(token.lemma)
else:
lemmas.append(token.text)
else:
Avoid deeply nested control flow statements. Open
Open
if re_match(search_term, dependency_relation.display_text(), flags = re_flags):
search_results.add(dependency_relation)
else:
Avoid deeply nested control flow statements. Open
Open
for node in nodes:
len_node = len(node)
for j, ngram in enumerate(wl_nlp_utils.ngrams(parallel_unit, len_node)):
if ngram == tuple(node):
Avoid deeply nested control flow statements. Open
Open
for tag in re.finditer(re_tags, para):
tags_tokens = self.add_tags_splitting(para[tag_last_end:tag.start()], tags_tokens)
tags_tokens[-1].append(tag.group())
tag_last_end = tag.end()
Avoid deeply nested control flow statements. Open
Open
if i != item_row and item_text == text:
wl_msg_boxes.Wl_Msg_Box_Warning(
self.main,
title = _tr('wl_lists', 'Duplicates Found'),
text = _tr('wl_lists', '''
Avoid deeply nested control flow statements. Open
Open
for sentence in sentences:
tokens_multilevel[-1].append(main.nltk_nist_tokenizer.international_tokenize(sentence))
case 'nltk_nltk':