Showing 204 of 204 total issues
Avoid deeply nested control flow statements. Open
Open
for j, collocate in enumerate(reversed(tags_left)):
if wl_matching.check_context(
i, tokens,
context_settings = settings['search_settings']['context_settings'],
search_terms_incl = search_terms_incl,
Avoid deeply nested control flow statements. Open
Open
if token.lemma is not None:
lemmas.append(token.lemma)
else:
lemmas.append(token.text)
else:
Avoid deeply nested control flow statements. Open
Open
for token, lemma_search in set(zip(tokens, lemmas_search)):
if re_match(lemma_matched, lemma_search, flags = re_flags):
search_results.add(token)
Avoid deeply nested control flow statements. Open
Open
for i, token in enumerate(sentence_seg):
if wl_checks_tokens.is_word_alphabetic(token):
sentence_seg[i] = ''
# Numerals
Avoid deeply nested control flow statements. Open
Open
if token_properties:
i_tag_start += len(doc)
else:
Avoid deeply nested control flow statements. Open
Open
if (
ngram in search_terms
and wl_matching.check_context(
j, tokens,
context_settings = settings['search_settings']['context_settings'],
Avoid deeply nested control flow statements. Open
Open
if not self.isRowHidden(row):
item = self.model().item(row, col)
item.setText(str(item.val))
# Floats
Avoid deeply nested control flow statements. Open
Open
for file_type, trs in TRS_FILE_TYPES.items():
if tr == file_type:
tr = trs[0]
break
Avoid deeply nested control flow statements. Open
Open
if settings_limit_searching == _tr('wl_colligation_extractor', 'None'):
tags_left = text.tags[max(0, i + window_left) : max(0, i + window_right + 1)]
else:
# Span positions (Left)
for position in range(max(0, i + window_left), max(0, i + window_right + 1)):
Avoid deeply nested control flow statements. Open
Open
if len_raw_temp_tokens == len_processed_temp_tokens:
results_modified.extend(results_temp)
elif len_raw_temp_tokens < len_processed_temp_tokens:
results_modified.extend(results_temp[:len_raw_temp_tokens])
elif len_raw_temp_tokens > len_processed_temp_tokens:
Avoid deeply nested control flow statements. Open
Open
for tag in re.finditer(re_tags, para):
tags_tokens = self.add_tags_splitting(para[tag_last_end:tag.start()], tags_tokens)
tags_tokens[-1].append(tag.group())
tag_last_end = tag.end()
Avoid deeply nested control flow statements. Open
Open
for sentence_seg in sentence:
for i, token in enumerate(sentence_seg):
if token.isupper():
sentence_seg[i] = ''
# Title Case
Avoid deeply nested control flow statements. Open
Open
if not self.isRowHidden(row):
item = self.model().item(row, col)
val_cum += item.val
item.setText(f'{val_cum:.{precision_decimals}}')
Avoid deeply nested control flow statements. Open
Open
if not self.isRowHidden(row):
item = self.model().item(row, col)
val_cum += item.val
item.setText(f'{val_cum:.{precision_pcts}%}')
Avoid deeply nested control flow statements. Open
Open
for i, langs in enumerate(langs_nlp_utils):
# Sentence/word tokenization
if i <= 1:
if lang_code_639_3 in langs:
doc_supported_lang += '|✔'
Avoid deeply nested control flow statements. Open
Open
for sentence_seg in sentence:
for token in sentence_seg:
head = token.head
for i_sentence_seg, sentence_seg in enumerate(sentence):
Avoid deeply nested control flow statements. Open
Open
for sentence_seg in sentence:
for i, token in enumerate(sentence_seg):
if token.islower():
sentence_seg[i] = ''
# Uppercase
Avoid deeply nested control flow statements. Open
Open
for sentence in doc.sents:
htmls.append(spacy.displacy.render(
sentence,
style = 'dep',
minify = True,
Avoid deeply nested control flow statements. Open
Open
for token in copy.deepcopy(parallel_unit):
parallel_unit_tokens_search.append(token)
if token.punc_mark:
parallel_unit_tokens_search.append(wl_texts.Wl_Token(token.punc_mark, lang = token.lang))
Avoid deeply nested control flow statements. Open
Open
for node in nodes:
len_node = len(node)
for j, ngram in enumerate(wl_nlp_utils.ngrams(parallel_unit, len_node)):
if ngram == tuple(node):