Showing 206 of 206 total issues
Avoid deeply nested control flow statements. Open
Open
for item in range(100):
item = wl_texts.Wl_Token(str(item))
freq_1, freq_2 = random.sample(range(100), 2)
freq_files_items[item] = [
Avoid deeply nested control flow statements. Open
Open
for token in sentence_seg:
if wl_checks_tokens.is_punc(token.head):
token.head = None
Avoid deeply nested control flow statements. Open
Open
for sentence in doc.sents:
htmls.append(spacy.displacy.render(
sentence,
style = 'dep',
minify = True,
Avoid deeply nested control flow statements. Open
Open
for j, collocate in enumerate(reversed(tags_left)):
if wl_matching.check_context(
i, tokens,
context_settings = settings['search_settings']['context_settings'],
search_terms_incl = search_terms_incl,
Avoid deeply nested control flow statements. Open
Open
for sentence in wl_sentence_tokenization.wl_sentence_split(self.main, para):
self.tokens_multilevel[-1].append([])
for sentence_seg in wl_sentence_tokenization.wl_sentence_seg_tokenize_tokens(
self.main,
Avoid deeply nested control flow statements. Open
Open
for j, count in enumerate(counts):
self.set_item_num(
row = self.model().rowCount() - 2,
col = j,
val = count
Avoid deeply nested control flow statements. Open
Open
for item, trs in TRS_MISC.items():
if tr == item:
tr = trs[0]
break
Avoid deeply nested control flow statements. Open
Open
for doc in nlp.pipe(lines):
for token in doc:
texts_tagged.append(token.text)
if tagset in ['default', 'raw']:
Function add_header_vert
has 6 arguments (exceeds 4 allowed). Consider refactoring. Open
Open
def add_header_vert(
Avoid deeply nested control flow statements. Open
Open
if token_properties:
i_tag_start += len(doc)
else:
Avoid deeply nested control flow statements. Open
Open
for j, collocate in enumerate(tags_right):
if wl_matching.check_context(
i, tokens,
context_settings = settings['search_settings']['context_settings'],
search_terms_incl = search_terms_incl,
Avoid deeply nested control flow statements. Open
Open
for sentence in doc.sents:
displacy_dict = spacy.displacy.parse_deps(sentence, options = options)
if token_properties:
for token, word in zip(sentence, displacy_dict['words']):
Avoid deeply nested control flow statements. Open
Open
for col in cols:
row_to_exp.append(self.table.model().item(row, col).text())
Avoid deeply nested control flow statements. Open
Open
if (
ngram in search_terms
and wl_matching.check_context(
j, tokens,
context_settings = settings['search_settings']['context_settings'],
Avoid deeply nested control flow statements. Open
Open
if re_match(search_term, dependency_relation.display_text(), flags = re_flags):
search_results.add(dependency_relation)
else:
Avoid deeply nested control flow statements. Open
Open
if not self.isRowHidden(row):
item = self.model().item(row, col)
item.setText(f'{item.val:.{precision_decimals}}')
# Percentages
Avoid deeply nested control flow statements. Open
Open
if settings['token_settings']['punc_marks']:
node_tokens_search = list(ngram)
# Remove empty tokens for searching in results
left_tokens_search = [token for token in copy.deepcopy(left_tokens_raw) if token]
Avoid deeply nested control flow statements. Open
Open
for i, token in enumerate(sentence_seg):
if wl_checks_tokens.is_word_alphabetic(token):
sentence_seg[i] = wl_texts.Wl_Token('')
# Numerals
Avoid deeply nested control flow statements. Open
Open
for sentence in doc.sents:
displacy_dict = spacy.displacy.parse_deps(sentence, options = options)
if token_properties:
for token, word in zip(sentence, displacy_dict['words']):
Avoid deeply nested control flow statements. Open
Open
for j in range(11):
self.set_item_err(row + j, i, text = self.tr('No language support'), alignment_hor = 'right')